summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/amd64/amd64/pmap.c12
-rw-r--r--sys/include/sys/loader.h4
-rw-r--r--sys/include/vm/map.h6
-rw-r--r--sys/include/vm/pmap.h4
-rw-r--r--sys/kern/kern_loader.c49
-rw-r--r--sys/vm/vm_map.c16
6 files changed, 64 insertions, 27 deletions
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index 7b8609a..2760532 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -134,9 +134,8 @@ pmap_extract(uint8_t level, vaddr_t va, uintptr_t *pmap, bool allocate)
* TODO: Ensure operations here are serialized.
*/
static int
-pmap_modify_tbl(struct vm_ctx *ctx, vaddr_t va, size_t val)
+pmap_modify_tbl(struct vm_ctx *ctx, struct vas vas, vaddr_t va, size_t val)
{
- struct vas vas = pmap_read_vas();
uintptr_t *pml4 = PHYS_TO_VIRT(vas.top_level);
uintptr_t *pdpt, *pd, *tbl;
int status = 0;
@@ -167,17 +166,18 @@ done:
}
int
-pmap_map(struct vm_ctx *ctx, vaddr_t va, paddr_t pa, vm_prot_t prot)
+pmap_map(struct vm_ctx *ctx, struct vas vas, vaddr_t va, paddr_t pa,
+ vm_prot_t prot)
{
uint32_t flags = pmap_prot_to_pte(prot);
- return pmap_modify_tbl(ctx, va, (pa | flags));
+ return pmap_modify_tbl(ctx, vas, va, (pa | flags));
}
int
-pmap_unmap(struct vm_ctx *ctx, vaddr_t va)
+pmap_unmap(struct vm_ctx *ctx, struct vas vas, vaddr_t va)
{
- return pmap_modify_tbl(ctx, va, 0);
+ return pmap_modify_tbl(ctx, vas, va, 0);
}
struct vas
diff --git a/sys/include/sys/loader.h b/sys/include/sys/loader.h
index 6107da2..c1aa426 100644
--- a/sys/include/sys/loader.h
+++ b/sys/include/sys/loader.h
@@ -31,6 +31,7 @@
#define _SYS_LOADER_H_
#include <sys/types.h>
+#include <vm/pmap.h>
#define AT_NULL 0
#define AT_IGNORE 1
@@ -54,7 +55,8 @@ struct auxval {
#if defined(_KERNEL)
-int loader_load(const void *dataptr, struct auxval *auxv);
+int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv,
+ size_t load_base, char **ld_path);
#endif /* defined(_KERNEL) */
#endif /* !_SYS_LOADER_H_ */
diff --git a/sys/include/vm/map.h b/sys/include/vm/map.h
index 1cce8b8..078a5e8 100644
--- a/sys/include/vm/map.h
+++ b/sys/include/vm/map.h
@@ -34,7 +34,9 @@
#include <sys/cdefs.h>
#include <vm/pmap.h>
-int vm_map_create(vaddr_t va, paddr_t pa, vm_prot_t prot, size_t bytes);
-int vm_map_destroy(vaddr_t va, size_t bytes);
+int vm_map_create(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot,
+ size_t bytes);
+
+int vm_map_destroy(struct vas vas, vaddr_t va, size_t bytes);
#endif /* !_VM_MMAP_H_ */
diff --git a/sys/include/vm/pmap.h b/sys/include/vm/pmap.h
index 85c6753..ebabd32 100644
--- a/sys/include/vm/pmap.h
+++ b/sys/include/vm/pmap.h
@@ -87,10 +87,10 @@ struct vas pmap_read_vas(void);
/*
* Map a physical address to a virtual address.
*/
-int pmap_map(struct vm_ctx *, vaddr_t, paddr_t, vm_prot_t);
+int pmap_map(struct vm_ctx *, struct vas, vaddr_t, paddr_t, vm_prot_t);
/*
* Unmap a page.
*/
-int pmap_unmap(struct vm_ctx *, vaddr_t);
+int pmap_unmap(struct vm_ctx *, struct vas, vaddr_t);
#endif /* _VM_PMAP_H_ */
diff --git a/sys/kern/kern_loader.c b/sys/kern/kern_loader.c
index cf51c72..73dc8c7 100644
--- a/sys/kern/kern_loader.c
+++ b/sys/kern/kern_loader.c
@@ -32,9 +32,11 @@
#include <sys/elf.h>
#include <sys/types.h>
#include <sys/syslog.h>
+#include <sys/errno.h>
#include <vm/vm.h>
#include <vm/map.h>
#include <vm/physseg.h>
+#include <vm/dynalloc.h>
#include <string.h>
#include <assert.h>
@@ -51,14 +53,15 @@ __KERNEL_META("$Hyra$: kern_loader.c, Ian Marco Moffett, "
#define PHDR(hdrptr, IDX) \
(void *)((uintptr_t)hdr + (hdrptr)->e_phoff + (hdrptr->e_phentsize*IDX))
-int
-loader_load(const void *dataptr, struct auxval *auxv)
+int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv,
+ size_t load_base, char **ld_path)
{
const Elf64_Ehdr *hdr = dataptr;
Elf64_Phdr *phdr;
vm_prot_t prot = 0;
uintptr_t physmem;
+ uintptr_t map_addr;
size_t misalign, page_count;
int status;
@@ -92,20 +95,50 @@ loader_load(const void *dataptr, struct auxval *auxv)
page_count = __DIV_ROUNDUP(phdr->p_memsz + misalign, GRANULE);
physmem = vm_alloc_pageframe(page_count);
- __assert(physmem != 0); /* TODO: Handle better */
- status = vm_map_create(phdr->p_vaddr, physmem, prot,
- page_count * GRANULE);
+ /* Do we not have enough page frames? */
+ if (physmem == 0) {
+ DBG("Failed to allocate physical memory\n");
+ vm_free_pageframe(physmem, page_count);
+ return -ENOMEM;
+ }
+
+ map_addr = phdr->p_vaddr + load_base;
+ status = vm_map_create(vas, map_addr, physmem, prot, page_count*GRANULE);
- __assert(status == 0); /* TODO: Handle better */
+ if (status != 0) {
+ DBG("Failed to map 0x%p - 0x%p\n",
+ phdr->p_vaddr + load_base,
+ (phdr->p_vaddr + load_base) + (page_count * GRANULE));
+
+ return status;
+ }
/* Now we want to copy the data */
tmp_ptr = (void *)((uintptr_t)hdr + phdr->p_offset);
- memcpy((void *)phdr->p_vaddr, tmp_ptr, phdr->p_filesz);
+ memcpy(PHYS_TO_VIRT(physmem), tmp_ptr, phdr->p_filesz);
+ break;
+ case PT_INTERP:
+ if (ld_path == NULL) {
+ break;
+ }
+
+ *ld_path = dynalloc(phdr->p_filesz);
+
+ if (ld_path == NULL) {
+ DBG("Failed to allocate memory for PT_INTERP path\n");
+ return -ENOMEM;
+ }
+
+ memcpy(*ld_path, (char *)hdr + phdr->p_offset, phdr->p_filesz);
+ break;
+ case PT_PHDR:
+ auxv->at_phdr = phdr->p_vaddr + load_base;
+ break;
}
}
- auxv->at_entry = hdr->e_entry;
+ auxv->at_entry = hdr->e_entry + load_base;
auxv->at_phent = hdr->e_phentsize;
auxv->at_phnum = hdr->e_phnum;
return 0;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index ae93c38..0d27738 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -47,14 +47,14 @@
* be enforced via a panic.
*/
static void
-vm_map_cleanup(struct vm_ctx *ctx, vaddr_t va, size_t bytes_aligned,
- size_t granule)
+vm_map_cleanup(struct vas vas, struct vm_ctx *ctx, vaddr_t va,
+ size_t bytes_aligned, size_t granule)
{
__assert(bytes_aligned != 0);
__assert((bytes_aligned & (granule - 1)) == 0);
for (size_t i = 0; i < bytes_aligned; i += 0x1000) {
- if (pmap_unmap(ctx, va + i) != 0) {
+ if (pmap_unmap(ctx, vas, va + i) != 0) {
/*
* XXX: This shouldn't happen... If it somehow does,
* then this should be handled.
@@ -75,7 +75,7 @@ vm_map_cleanup(struct vm_ctx *ctx, vaddr_t va, size_t bytes_aligned,
* machine's page granule, typically a 4k boundary.
*/
int
-vm_map_create(vaddr_t va, paddr_t pa, vm_prot_t prot, size_t bytes)
+vm_map_create(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot, size_t bytes)
{
size_t granule = vm_get_page_size();
int s;
@@ -95,10 +95,10 @@ vm_map_create(vaddr_t va, paddr_t pa, vm_prot_t prot, size_t bytes)
}
for (uintptr_t i = 0; i < bytes; i += granule) {
- s = pmap_map(ctx, va + i, pa + i, prot);
+ s = pmap_map(ctx, vas, va + i, pa + i, prot);
if (s != 0) {
/* Something went a bit wrong here, cleanup */
- vm_map_cleanup(ctx, va, i, bytes);
+ vm_map_cleanup(vas, ctx, va, i, bytes);
return -1;
}
}
@@ -111,7 +111,7 @@ vm_map_create(vaddr_t va, paddr_t pa, vm_prot_t prot, size_t bytes)
* address space.
*/
int
-vm_map_destroy(vaddr_t va, size_t bytes)
+vm_map_destroy(struct vas vas, vaddr_t va, size_t bytes)
{
struct vm_ctx *ctx = vm_get_ctx();
size_t granule = vm_get_page_size();
@@ -128,7 +128,7 @@ vm_map_destroy(vaddr_t va, size_t bytes)
}
for (uintptr_t i = 0; i < bytes; i += granule) {
- s = pmap_unmap(ctx, va + i);
+ s = pmap_unmap(ctx, vas, va + i);
if (s != 0) {
return -1;
}