aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amd64/amd64/trap.S5
-rw-r--r--sys/arch/amd64/amd64/trap.c39
-rw-r--r--sys/include/vm/fault.h38
-rw-r--r--sys/include/vm/map.h4
-rw-r--r--sys/include/vm/obj.h1
-rw-r--r--sys/vm/vm_fault.c114
-rw-r--r--sys/vm/vm_map.c67
7 files changed, 250 insertions, 18 deletions
diff --git a/sys/arch/amd64/amd64/trap.S b/sys/arch/amd64/amd64/trap.S
index 66dd2a9..3cf6888 100644
--- a/sys/arch/amd64/amd64/trap.S
+++ b/sys/arch/amd64/amd64/trap.S
@@ -131,9 +131,8 @@ page_fault:
handle_trap
- /* TODO */
- cli
- hlt
+ pop_trapframe
+ iretq
.globl nmi
nmi:
diff --git a/sys/arch/amd64/amd64/trap.c b/sys/arch/amd64/amd64/trap.c
index 3caaaff..be0493f 100644
--- a/sys/arch/amd64/amd64/trap.c
+++ b/sys/arch/amd64/amd64/trap.c
@@ -35,6 +35,7 @@
#include <sys/signal.h>
#include <sys/proc.h>
#include <sys/sched.h>
+#include <vm/fault.h>
static const char *trap_type[] = {
[TRAP_BREAKPOINT] = "breakpoint",
@@ -53,6 +54,31 @@ static const char *trap_type[] = {
static const int TRAP_COUNT = __ARRAY_COUNT(trap_type);
+
+static inline vaddr_t
+pf_faultaddr(void)
+{
+ uintptr_t cr2;
+ __ASMV("mov %%cr2, %0\n" : "=r" (cr2) :: "memory");
+ return cr2;
+}
+
+static inline vm_prot_t
+pf_accesstype(struct trapframe *tf)
+{
+ vm_prot_t prot = 0;
+ uint64_t ec = tf->error_code;
+
+ if (__TEST(ec, __BIT(1)))
+ prot |= PROT_WRITE;
+ if (__TEST(ec, __BIT(2)))
+ prot |= PROT_USER;
+ if (__TEST(ec, __BIT(4)))
+ prot |= PROT_EXEC;
+
+ return prot;
+}
+
static void
dbg_errcode(struct trapframe *tf)
{
@@ -87,11 +113,10 @@ trap_print(struct trapframe *tf)
static void
regdump(struct trapframe *tf)
{
- uintptr_t cr3, cr2;
+ uintptr_t cr3, cr2 = pf_faultaddr();
- __ASMV("mov %%cr2, %0\n"
- "mov %%cr3, %1\n"
- : "=r" (cr2), "=r" (cr3)
+ __ASMV("mov %%cr3, %0\n"
+ : "=r" (cr3)
:
: "memory"
);
@@ -121,6 +146,7 @@ void
trap_handler(struct trapframe *tf)
{
struct proc *curtd = this_td();
+ int s;
/*
* XXX: Handle NMIs better. For now we just
@@ -142,6 +168,11 @@ trap_handler(struct trapframe *tf)
case TRAP_ARITH_ERR:
signal_raise(curtd, SIGFPE);
break;
+ case TRAP_PAGEFLT:
+ s = vm_fault(pf_faultaddr(), pf_accesstype(tf));
+ if (s != 0)
+ signal_raise(curtd, SIGSEGV);
+ break;
default:
signal_raise(curtd, SIGSEGV);
break;
diff --git a/sys/include/vm/fault.h b/sys/include/vm/fault.h
new file mode 100644
index 0000000..f0e308c
--- /dev/null
+++ b/sys/include/vm/fault.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VM_FAULT_H_
+#define _VM_FAULT_H_
+
+#include <sys/types.h>
+#include <vm/pmap.h>
+
+int vm_fault(vaddr_t va, vm_prot_t access_type);
+
+#endif /* !_VM_FAULT_H_ */
diff --git a/sys/include/vm/map.h b/sys/include/vm/map.h
index f482788..1da8240 100644
--- a/sys/include/vm/map.h
+++ b/sys/include/vm/map.h
@@ -46,10 +46,14 @@
/* Memory map table entry count */
#define MTAB_ENTRIES 32
+struct vm_object;
+
struct vm_mapping {
TAILQ_ENTRY(vm_mapping) link;
struct vm_range range;
+ struct vm_object *vmobj;
paddr_t physmem_base;
+ vm_prot_t prot;
/* Private */
size_t vhash; /* Virtual address hash */
diff --git a/sys/include/vm/obj.h b/sys/include/vm/obj.h
index c1c2f17..ef07a1e 100644
--- a/sys/include/vm/obj.h
+++ b/sys/include/vm/obj.h
@@ -42,6 +42,7 @@ struct vm_object {
struct vm_pagerops *pgops; /* Pager operations */
uint8_t is_anon : 1; /* Is an anonymous mapping */
+ uint8_t demand : 1; /* Only mapped upon access */
int ref; /* Ref count */
struct vnode *vnode; /* Only used if `is_anon` is 0 */
};
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
new file mode 100644
index 0000000..ec733e8
--- /dev/null
+++ b/sys/vm/vm_fault.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/sched.h>
+#include <vm/fault.h>
+#include <vm/map.h>
+#include <vm/pmap.h>
+#include <vm/vm.h>
+#include <vm/physseg.h>
+
+static struct vm_mapping *
+vm_mapq_search(vm_mapq_t *mq, vaddr_t addr)
+{
+ struct vm_mapping *mapping;
+ const struct vm_range *range;
+
+ TAILQ_FOREACH(mapping, mq, link) {
+ range = &mapping->range;
+ if (addr >= range->start && addr <= range->end) {
+ return mapping;
+ }
+ }
+
+ return NULL;
+}
+
+static struct vm_mapping *
+vm_find_mapping(vaddr_t addr)
+{
+ struct vm_mapping *mapping;
+ struct proc *td;
+ vm_mapq_t *mapq;
+
+ mapping = vm_mapping_fetch(&td->mapspace, addr);
+ if (mapping != NULL)
+ return mapping;
+
+ /* Need to search other maps */
+ td = this_td();
+ for (size_t i = 0; i < MTAB_ENTRIES; ++i) {
+ mapq = &td->mapspace.mtab[i];
+ mapping = vm_mapq_search(mapq, addr);
+ if (mapping != NULL)
+ return mapping;
+ }
+
+ return NULL;
+}
+
+int
+vm_fault(vaddr_t va, vm_prot_t access_type)
+{
+ struct proc *td = this_td();
+ struct vm_mapping *mapping;
+ struct vm_object *vmobj;
+
+ size_t granule = vm_get_page_size();
+ vaddr_t va_base = va &= ~(granule - 1);
+
+ int s;
+ paddr_t pa_base;
+
+ mapping = vm_find_mapping(va_base);
+ if (mapping == NULL)
+ return -1;
+
+ if ((vmobj = mapping->vmobj) == NULL)
+ /* Virtual memory object non-existent */
+ return -1;
+ if (!vmobj->demand)
+ /* Demand paging not enabled for this object */
+ return -1;
+ if ((access_type & ~mapping->prot) != 0)
+ /* Invalid access type */
+ return -1;
+
+ /* Allocate physical memory if needed */
+ if (mapping->physmem_base == 0) {
+ pa_base = vm_alloc_pageframe(1);
+ mapping->physmem_base = pa_base;
+ } else {
+ pa_base = mapping->physmem_base;
+ }
+
+ s = vm_map_create(td->addrsp, va_base, pa_base, access_type, granule);
+ return s;
+}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 8f7accc..7e67ebd 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -140,7 +140,7 @@ vm_mapspace_insert(struct vm_mapspace *ms, struct vm_mapping *mapping)
}
/*
- * Create an anonymous mapping.
+ * Create a mapping (internal helper)
*
* @addr: Address to map.
* @physmem: Physical address, set to 0 to alloc one here
@@ -149,7 +149,7 @@ vm_mapspace_insert(struct vm_mapspace *ms, struct vm_mapping *mapping)
* Returns zero on failure.
*/
static paddr_t
-vm_anon_map(void *addr, paddr_t physmem, vm_prot_t prot, size_t len)
+vm_map(void *addr, paddr_t physmem, vm_prot_t prot, size_t len)
{
struct proc *td = this_td();
const size_t GRANULE = vm_get_page_size();
@@ -187,7 +187,8 @@ vm_anon_map(void *addr, paddr_t physmem, vm_prot_t prot, size_t len)
* @fd: File descriptor.
*/
static paddr_t
-vm_fd_map(void *addr, vm_prot_t prot, size_t len, off_t off, int fd)
+vm_fd_map(void *addr, vm_prot_t prot, size_t len, off_t off, int fd,
+ struct vm_mapping *mapping)
{
paddr_t physmem = 0;
@@ -216,6 +217,9 @@ vm_fd_map(void *addr, vm_prot_t prot, size_t len, off_t off, int fd)
if (vm_obj_init(&vp->vmobj, vp) != 0)
return 0;
+ mapping->vmobj = vp->vmobj;
+ vm_object_ref(vp->vmobj);
+
/* Try to fetch a physical address */
if (vm_pager_paddr(vp->vmobj, &physmem, prot) != 0) {
vm_obj_destroy(vp->vmobj);
@@ -228,7 +232,7 @@ vm_fd_map(void *addr, vm_prot_t prot, size_t len, off_t off, int fd)
* then connect it to the physical address (creates a shared mapping)
*/
if (physmem != 0) {
- vm_anon_map(addr, physmem, prot, len);
+ vm_map(addr, physmem, prot, len);
return physmem;
}
@@ -238,7 +242,7 @@ vm_fd_map(void *addr, vm_prot_t prot, size_t len, off_t off, int fd)
* anonymous mapping then page-in from whatever filesystem
* (creates a shared mapping)
*/
- physmem = vm_anon_map(addr, 0, prot, len);
+ physmem = vm_map(addr, 0, prot, len);
pg.physaddr = physmem;
if (vm_pager_get(vp->vmobj, off, len, &pg) != 0) {
@@ -255,6 +259,9 @@ munmap(void *addr, size_t len)
struct proc *td = this_td();
struct vm_mapping *mapping;
+ struct vm_object *obj;
+ struct vnode *vp;
+
struct vm_mapspace *ms;
size_t map_len, granule;
vaddr_t map_start, map_end;
@@ -272,6 +279,23 @@ munmap(void *addr, size_t len)
map_end = mapping->range.end;
map_len = map_end - map_start;
+ /* Try to release any virtual memory objects */
+ if ((obj = mapping->vmobj) != NULL) {
+ spinlock_acquire(&obj->lock);
+ /*
+ * Drop our ref and try to cleanup. If the refcount
+ * is > 1, something is still holding it and we can't
+ * do much.
+ */
+ vm_object_unref(obj);
+ vp = obj->vnode;
+ if (vp != NULL && obj->ref == 1) {
+ vp->vmobj = NULL;
+ vm_obj_destroy(obj);
+ }
+ spinlock_release(&obj->lock);
+ }
+
/* Release the mapping */
vm_map_destroy(td->addrsp, map_start, map_len);
vm_free_pageframe(mapping->range.start, map_len / granule);
@@ -292,10 +316,13 @@ mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
struct proc *td = this_td();
struct vm_mapping *mapping = ALLOC_MAPPING();
+ struct vm_object *vmobj;
size_t misalign = ((vaddr_t)addr) & (GRANULE - 1);
paddr_t physmem = 0;
+ mapping->prot = prot | PROT_USER;
+
/* Ensure of valid prot flags */
if ((prot & ~PROT_MASK) != 0)
return MAP_FAILED;
@@ -315,15 +342,33 @@ mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
* this is.
*/
if (__TEST(flags, MAP_ANONYMOUS)) {
- /* Handle an anonymous map request */
- physmem = vm_anon_map(addr, 0, prot, len);
+ /* Try to create a virtual memory object */
+ if (vm_obj_init(&vmobj, NULL) != 0)
+ return 0;
+
+ /*
+ * Enable demand paging for this object if
+ * `addr` is not NULL.
+ */
+ if (addr != NULL) {
+ vmobj->is_anon = 1;
+ vmobj->demand = 1;
+
+ mapping->vmobj = vmobj;
+ mapping->physmem_base = 0;
+ } else {
+ physmem = vm_map(addr, 0, prot, len);
+ }
+
+ /* Did this work? */
+ if (physmem == 0 && addr == NULL)
+ return MAP_FAILED;
} else if (__TEST(flags, MAP_SHARED)) {
- physmem = vm_fd_map(addr, prot, len, off, fildes);
+ physmem = vm_fd_map(addr, prot, len, off, fildes, mapping);
+ if (physmem == 0)
+ return MAP_FAILED;
}
- if (physmem == 0) {
- return MAP_FAILED;
- }
map_start = __ALIGN_DOWN((vaddr_t)addr, GRANULE);
map_end = map_start + __ALIGN_UP(len + misalign, GRANULE);