summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2024-04-16 20:10:07 -0400
committerIan Moffett <ian@osmora.org>2024-04-16 20:10:07 -0400
commit0d8da6f5436874c1dd987b53d11a3d3aea468f67 (patch)
tree02bcff6e0a18633d8318269cb23780f775d99727
parent9a26f4d453b1742c6249d66a077a175120e23338 (diff)
kernel: vm_map: Add mmap() and munmap()
Signed-off-by: Ian Moffett <ian@osmora.org>
-rw-r--r--lib/libc/include/sys/syscall.h2
-rw-r--r--sys/include/sys/proc.h3
-rw-r--r--sys/include/sys/syscall.h2
-rw-r--r--sys/include/vm/map.h38
-rw-r--r--sys/kern/kern_sched.c10
-rw-r--r--sys/kern/kern_syscall.c3
-rw-r--r--sys/vm/vm_map.c210
7 files changed, 268 insertions, 0 deletions
diff --git a/lib/libc/include/sys/syscall.h b/lib/libc/include/sys/syscall.h
index 55d7bbf..e0900c7 100644
--- a/lib/libc/include/sys/syscall.h
+++ b/lib/libc/include/sys/syscall.h
@@ -40,6 +40,8 @@
#define SYS_close 4
#define SYS_read 5
#define SYS_lseek 6
+#define SYS_mmap 7
+#define SYS_munmap 8
#if !defined(__ASSEMBLER__)
__attribute__((__always_inline__))
diff --git a/sys/include/sys/proc.h b/sys/include/sys/proc.h
index 09aebf5..f74d1da 100644
--- a/sys/include/sys/proc.h
+++ b/sys/include/sys/proc.h
@@ -38,6 +38,7 @@
#include <machine/frame.h>
#include <machine/pcb.h>
#include <vm/vm.h>
+#include <vm/map.h>
#define PROC_MAX_FDS 256
#define PROC_MAX_ADDR_RANGE 4
@@ -62,6 +63,8 @@ struct proc {
uint8_t is_user;
uint32_t signal;
struct filedesc *fds[PROC_MAX_FDS];
+ struct spinlock mapspace_lock;
+ struct vm_mapspace mapspace;
TAILQ_ENTRY(proc) link;
};
diff --git a/sys/include/sys/syscall.h b/sys/include/sys/syscall.h
index 8ebc2da..59b6036 100644
--- a/sys/include/sys/syscall.h
+++ b/sys/include/sys/syscall.h
@@ -43,6 +43,8 @@ enum {
SYS_close,
SYS_read,
SYS_lseek,
+ SYS_mmap,
+ SYS_munmap,
__MAX_SYSCALLS
};
diff --git a/sys/include/vm/map.h b/sys/include/vm/map.h
index 078a5e8..f482788 100644
--- a/sys/include/vm/map.h
+++ b/sys/include/vm/map.h
@@ -32,11 +32,49 @@
#include <sys/types.h>
#include <sys/cdefs.h>
+#include <sys/queue.h>
+#include <sys/syscall.h>
+#include <sys/spinlock.h>
#include <vm/pmap.h>
+#include <vm/vm.h>
+
+#define MAP_SHARED 0x0001
+#define MAP_PRIVATE 0x0002
+#define MAP_ANONYMOUS 0x0010
+#define MAP_FAILED ((void *)-1)
+
+/* Memory map table entry count */
+#define MTAB_ENTRIES 32
+
+struct vm_mapping {
+ TAILQ_ENTRY(vm_mapping) link;
+ struct vm_range range;
+ paddr_t physmem_base;
+
+ /* Private */
+ size_t vhash; /* Virtual address hash */
+};
+
+typedef TAILQ_HEAD(, vm_mapping) vm_mapq_t;
+
+struct vm_mapspace {
+ vm_mapq_t mtab[MTAB_ENTRIES]; /* Map table */
+ size_t map_count;
+};
+
int vm_map_create(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot,
size_t bytes);
int vm_map_destroy(struct vas vas, vaddr_t va, size_t bytes);
+uint64_t sys_mmap(struct syscall_args *args);
+uint64_t sys_munmap(struct syscall_args *args);
+
+/* Mapespace operations */
+void vm_mapspace_insert(struct vm_mapspace *ms, struct vm_mapping *mapping);
+void vm_mapspace_remove(struct vm_mapspace *ms, struct vm_mapping *mapping);
+struct vm_mapping *vm_mapping_fetch(struct vm_mapspace *ms, vaddr_t va);
+void vm_free_mapq(vm_mapq_t *mapq);
+
#endif /* !_VM_MMAP_H_ */
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 877c55d..214032a 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -265,6 +265,10 @@ sched_create_td(uintptr_t rip, char *argvp[], char *envp[], struct auxval auxv,
td->tf = tf;
td->addrsp = vas;
td->is_user = is_user;
+ for (size_t i = 0; i < MTAB_ENTRIES; ++i) {
+ /* Init the memory mapping table */
+ TAILQ_INIT(&td->mapspace.mtab[i]);
+ }
if (prog_range != NULL) {
memcpy(exec_range, prog_range, sizeof(struct vm_range));
}
@@ -288,6 +292,7 @@ static void
sched_destroy_td(struct proc *td)
{
const struct vm_range *stack_range = &td->addr_range[ADDR_RANGE_STACK];
+ vm_mapq_t *mapq;
processor_free_pcb(td);
@@ -308,6 +313,11 @@ sched_destroy_td(struct proc *td)
fd_close_fdnum(td, i);
}
+ for (size_t i = 0; i < MTAB_ENTRIES; ++i) {
+ mapq = &td->mapspace.mtab[i];
+ vm_free_mapq(mapq);
+ }
+
pmap_free_vas(vm_get_ctx(), td->addrsp);
dynfree(td);
}
diff --git a/sys/kern/kern_syscall.c b/sys/kern/kern_syscall.c
index a630195..b4a86f3 100644
--- a/sys/kern/kern_syscall.c
+++ b/sys/kern/kern_syscall.c
@@ -32,6 +32,7 @@
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/filedesc.h>
+#include <vm/map.h>
__noreturn static uint64_t
sys_exit(struct syscall_args *args)
@@ -47,4 +48,6 @@ uint64_t(*g_syscall_table[__MAX_SYSCALLS])(struct syscall_args *args) = {
sys_close,
sys_read,
sys_lseek,
+ sys_mmap,
+ sys_munmap
};
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 7d79108..266d696 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -30,11 +30,208 @@
#include <vm/map.h>
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/physseg.h>
+#include <vm/dynalloc.h>
#include <sys/types.h>
#include <sys/cdefs.h>
#include <sys/panic.h>
+#include <sys/sched.h>
#include <lib/assert.h>
+#define ALLOC_MAPPING() dynalloc(sizeof(struct vm_mapping))
+
+static size_t
+vm_hash_vaddr(vaddr_t va) {
+ va = (va ^ (va >> 30)) * (size_t)0xBF58476D1CE4E5B9;
+ va = (va ^ (va >> 27)) * (size_t)0x94D049BB133111EB;
+ va = va ^ (va >> 31);
+ return va;
+}
+
+/*
+ * Destroy a map queue.
+ */
+void
+vm_free_mapq(vm_mapq_t *mapq)
+{
+ struct vm_mapping *map;
+ size_t map_pages, granule;
+
+ granule = vm_get_page_size();
+ TAILQ_FOREACH(map, mapq, link) {
+ map_pages = (map->range.end - map->range.start) / granule;
+ vm_free_pageframe(map->range.start, map_pages);
+ }
+ dynfree(map);
+}
+
+/*
+ * Remove a mapping from a mapspace.
+ *
+ * @ms: Mapspace.
+ * @mapping: Mapping to remove.
+ */
+void
+vm_mapspace_remove(struct vm_mapspace *ms, struct vm_mapping *mapping)
+{
+ size_t vhash;
+ vm_mapq_t *mapq;
+
+ if (ms == NULL)
+ return;
+
+ vhash = vm_hash_vaddr(mapping->range.start);
+ mapq = &ms->mtab[vhash % MTAB_ENTRIES];
+ TAILQ_REMOVE(mapq, mapping, link);
+}
+
+/*
+ * Fetch a mapping from a mapspace.
+ *
+ * @ms: Mapspace.
+ * @va: Virtual address.
+ */
+struct vm_mapping *
+vm_mapping_fetch(struct vm_mapspace *ms, vaddr_t va)
+{
+ size_t vhash;
+ const vm_mapq_t *mapq;
+ struct vm_mapping *map;
+
+ if (ms == NULL)
+ return NULL;
+
+ vhash = vm_hash_vaddr(va);
+ mapq = &ms->mtab[vhash % MTAB_ENTRIES];
+
+ TAILQ_FOREACH(map, mapq, link) {
+ if (map->vhash == vhash) {
+ return map;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Insert a mapping into a mapspace.
+ *
+ * @ms: Target mapspace.
+ * @mapping: Mapping to insert.
+ */
+void
+vm_mapspace_insert(struct vm_mapspace *ms, struct vm_mapping *mapping)
+{
+ size_t vhash;
+ vm_mapq_t *q;
+
+ if (mapping == NULL || ms == NULL)
+ return;
+
+ vhash = vm_hash_vaddr(mapping->range.start);
+ mapping->vhash = vhash;
+
+ q = &ms->mtab[vhash % MTAB_ENTRIES];
+ TAILQ_INSERT_HEAD(q, mapping, link);
+}
+
+static int
+munmap(void *addr, size_t len)
+{
+ struct proc *td = this_td();
+ struct vm_mapping *mapping;
+
+ struct vm_mapspace *ms;
+ size_t map_len, granule;
+ vaddr_t map_start, map_end;
+
+ spinlock_acquire(&td->mapspace_lock);
+ ms = &td->mapspace;
+
+ granule = vm_get_page_size();
+ mapping = vm_mapping_fetch(ms, (vaddr_t)addr);
+ if (mapping == NULL) {
+ return -1;
+ }
+
+ map_start = mapping->range.start;
+ map_end = mapping->range.end;
+ map_len = map_end - map_start;
+
+ /* Release the mapping */
+ vm_map_destroy(td->addrsp, map_start, map_len);
+ vm_free_pageframe(mapping->range.start, map_len / granule);
+
+ /* Destroy the mapping descriptor */
+ vm_mapspace_remove(ms, mapping);
+ dynfree(mapping);
+ spinlock_release(&td->mapspace_lock);
+ return 0;
+}
+
+static void *
+mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
+{
+ const int PROT_MASK = PROT_WRITE | PROT_EXEC;
+ const size_t GRANULE = vm_get_page_size();
+ uintptr_t map_end, map_start;
+
+ struct proc *td = this_td();
+ struct vm_mapping *mapping = ALLOC_MAPPING();
+
+ size_t misalign = ((vaddr_t)addr) & (GRANULE - 1);
+ int status;
+ paddr_t physmem;
+
+ if ((prot & ~PROT_MASK) != 0)
+ /* Invalid prot */
+ return MAP_FAILED;
+
+ /* Allocate the physical memory */
+ physmem = vm_alloc_pageframe(len / GRANULE);
+ if (physmem == 0)
+ return MAP_FAILED;
+
+ /*
+ * Handle address being NULL.
+ *
+ * FIXME: XXX: We currently identity map physmem which
+ * is probably not ideal.
+ */
+ if (addr == NULL) {
+ addr = (void *)physmem;
+ }
+
+ /* Handle an anonymous map request */
+ if (__TEST(flags, MAP_ANONYMOUS)) {
+ /*
+ * XXX: There is no need to worry about alignment yet
+ * as vm_map_create() handles that internally.
+ */
+ prot |= PROT_USER;
+ status = vm_map_create(td->addrsp, (vaddr_t)addr, physmem, prot, len);
+ if (status != 0) {
+ vm_free_pageframe(physmem, len / GRANULE);
+ return MAP_FAILED;
+ }
+ } else {
+ return MAP_FAILED;
+ }
+
+ map_start = __ALIGN_DOWN((vaddr_t)addr, GRANULE);
+ map_end = map_start + __ALIGN_UP(len + misalign, GRANULE);
+
+ mapping->range.start = map_start;
+ mapping->range.end = map_end;
+ mapping->physmem_base = physmem;
+
+ /* Add to mapspace */
+ spinlock_acquire(&td->mapspace_lock);
+ vm_mapspace_insert(&td->mapspace, mapping);
+ spinlock_release(&td->mapspace_lock);
+ return (void *)addr;
+}
+
/*
* Internal routine for cleaning up.
*
@@ -141,3 +338,16 @@ vm_map_destroy(struct vas vas, vaddr_t va, size_t bytes)
return 0;
}
+
+uint64_t
+sys_mmap(struct syscall_args *args)
+{
+ return (uintptr_t)mmap((void *)args->arg0, args->arg1, args->arg2,
+ args->arg3, args->arg4, args->arg5);
+}
+
+uint64_t
+sys_munmap(struct syscall_args *args)
+{
+ return munmap((void *)args->arg0, args->arg1);
+}