diff options
author | Ian Moffett <ian@osmora.org> | 2024-03-17 20:59:31 -0400 |
---|---|---|
committer | Ian Moffett <ian@osmora.org> | 2024-03-17 20:59:31 -0400 |
commit | 0776264c266e7c1619b8b8b84d2da5384979bb3c (patch) | |
tree | 80b6ded012336e24c72cc2c16e4c3ce952b7b739 /sys | |
parent | 7895aaff402a021a1b04645ca8d251f760e5662e (diff) | |
parent | 2896f4126de2ee0fd1bab4b960bfb2213c359f18 (diff) |
Merge branch 'user' into dev
Diffstat (limited to 'sys')
31 files changed, 948 insertions, 90 deletions
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c index 6342aab..47d6dd0 100644 --- a/sys/arch/amd64/amd64/machdep.c +++ b/sys/arch/amd64/amd64/machdep.c @@ -40,6 +40,7 @@ #include <machine/spectre.h> #include <machine/cpu.h> #include <machine/uart.h> +#include <machine/cpuid.h> #include <vm/vm.h> #include <vm/dynalloc.h> #include <vm/physseg.h> @@ -56,6 +57,8 @@ __KERNEL_META("$Hyra$: machdep.c, Ian Marco Moffett, " #define INIT_FLAG_IOAPIC 0x00000001U #define INIT_FLAG_ACPI 0x00000002U +void syscall_isr(void); + static inline void init_tss(struct cpu_info *cur_cpu) { @@ -78,18 +81,28 @@ interrupts_init(void) idt_set_desc(0x8, IDT_TRAP_GATE_FLAGS, ISR(double_fault), 0); idt_set_desc(0xA, IDT_TRAP_GATE_FLAGS, ISR(invl_tss), 0); idt_set_desc(0xB, IDT_TRAP_GATE_FLAGS, ISR(segnp), 0); + idt_set_desc(0xC, IDT_TRAP_GATE_FLAGS, ISR(ss_fault), 0); idt_set_desc(0xD, IDT_TRAP_GATE_FLAGS, ISR(general_prot), 0); idt_set_desc(0xE, IDT_TRAP_GATE_FLAGS, ISR(page_fault), 0); + idt_set_desc(0x80, IDT_INT_GATE_USER, ISR(syscall_isr), 0); idt_load(); } +static bool +is_sse_supported(void) +{ + uint32_t edx, unused; + + __CPUID(0x00000001, unused, unused, unused, edx); + return __TEST(edx, __BIT(25)) && __TEST(edx, __BIT(26)); +} + void processor_halt(void) { __ASMV("cli; hlt"); } - /* * Send char to serial for debugging purposes. */ @@ -144,11 +157,63 @@ intr_unmask(void) __ASMV("sti"); } +int +processor_init_pcb(struct proc *proc) +{ + struct pcb *pcb = &proc->pcb; + const uint16_t FPU_FCW = 0x33F; + const uint32_t SSE_MXCSR = 0x1F80; + + /* Allocate FPU save area, aligned on a 16 byte boundary */ + pcb->fpu_state = PHYS_TO_VIRT(vm_alloc_pageframe(1)); + if (pcb->fpu_state == NULL) { + return -1; + } + + /* + * Setup x87 FPU control word and SSE MXCSR bits + * as per the sysv ABI + */ + __ASMV("fldcw %0\n" + "ldmxcsr %1" + :: "m" (FPU_FCW), + "m" (SSE_MXCSR) : "memory"); + + amd64_fxsave(pcb->fpu_state); + return 0; +} + +int +processor_free_pcb(struct proc *proc) +{ + struct pcb *pcb = &proc->pcb; + + if (pcb->fpu_state == NULL) { + return -1; + } + + vm_free_pageframe(VIRT_TO_PHYS(pcb->fpu_state), 1); + return 0; +} + +void +processor_switch_to(struct proc *old_td, struct proc *new_td) +{ + struct pcb *old_pcb = (old_td != NULL) ? &old_td->pcb : NULL; + struct pcb *new_pcb = &new_td->pcb; + + if (old_pcb != NULL) { + amd64_fxsave(old_pcb->fpu_state); + } + amd64_fxrstor(new_pcb->fpu_state); +} + void processor_init(void) { /* Indicates what doesn't need to be init anymore */ static uint8_t init_flags = 0; + static uint64_t reg_tmp; struct cpu_info *cur_cpu; /* Create our cpu_info structure */ @@ -159,6 +224,21 @@ processor_init(void) /* Set %GS to cpu_info */ amd64_write_gs_base((uintptr_t)cur_cpu); + if (is_sse_supported()) { + /* Enable SSE/SSE2 */ + reg_tmp = amd64_read_cr0(); + reg_tmp &= ~(__BIT(2)); + reg_tmp |= __BIT(1); + amd64_write_cr0(reg_tmp); + + /* Enable FXSAVE/FXRSTOR */ + reg_tmp = amd64_read_cr4(); + reg_tmp |= 3 << 9; + amd64_write_cr4(reg_tmp); + } else { + panic("SSE/SSE2 not supported!\n"); + } + CPU_INFO_LOCK(cur_cpu); init_tss(cur_cpu); diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c index 2760532..54d4ca3 100644 --- a/sys/arch/amd64/amd64/pmap.c +++ b/sys/arch/amd64/amd64/pmap.c @@ -228,6 +228,19 @@ pmap_switch_vas(struct vm_ctx *ctx, struct vas vas) : "memory"); } +/* + * TODO: During the mapping of a virtual address, a level + * may be allocated. This function does not handle the + * freeing of allocated levels. We should keep track + * of levels allocated and free them here. + */ +int +pmap_free_vas(struct vm_ctx *ctx, struct vas vas) +{ + vm_free_pageframe(vas.top_level, 1); + return 0; +} + struct vas pmap_read_vas(void) { diff --git a/sys/arch/amd64/amd64/spectre.c b/sys/arch/amd64/amd64/spectre.c index 1247607..05aa557 100644 --- a/sys/arch/amd64/amd64/spectre.c +++ b/sys/arch/amd64/amd64/spectre.c @@ -62,13 +62,18 @@ __weak int try_spectre_mitigate(void) { uint64_t tmp; + static bool should_log = true; if (!__can_mitigate_spectre()) { KINFO("IBRS not supported; spectre mitigation NOT enabled\n"); return EXIT_FAILURE; } - KINFO("IBRS supported; spectre mitigation enabled\n"); + /* This is called per processor, only log once */ + if (should_log) { + KINFO("IBRS supported; spectre mitigation enabled\n"); + should_log = false; + } tmp = rdmsr(IA32_SPEC_CTL); tmp |= __BIT(0); /* IBRS */ diff --git a/sys/arch/amd64/amd64/syscall.S b/sys/arch/amd64/amd64/syscall.S new file mode 100644 index 0000000..fe70523 --- /dev/null +++ b/sys/arch/amd64/amd64/syscall.S @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + #include <sys/cdefs.h> + #include <machine/frameasm.h> + +__KERNEL_META "$Hyra$: syscall.S, Ian Marco Moffett, \ + Syscall ISR code" + +.text +.globl syscall_isr +syscall_isr: + push_trapframe $0 + mov %rsp, %rdi + call __syscall + pop_trapframe + iretq diff --git a/sys/arch/amd64/amd64/syscall.c b/sys/arch/amd64/amd64/syscall.c new file mode 100644 index 0000000..68235d5 --- /dev/null +++ b/sys/arch/amd64/amd64/syscall.c @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/syscall.h> + +void +__syscall(struct trapframe *tf) +{ + struct syscall_args args = { + .code = tf->rax, + .arg0 = tf->rdi, + .arg1 = tf->rsi, + .arg2 = tf->rcx, + .arg3 = tf->r8, + .arg4 = tf->r9, + .sp = tf->rsp + }; + + if (args.code < __MAX_SYSCALLS && args.code > 0) { + args.code -= 1; + tf->rax = g_syscall_table[args.code](&args); + } +} diff --git a/sys/arch/amd64/amd64/trap.S b/sys/arch/amd64/amd64/trap.S index 5a77955..66dd2a9 100644 --- a/sys/arch/amd64/amd64/trap.S +++ b/sys/arch/amd64/amd64/trap.S @@ -144,3 +144,13 @@ nmi: /* TODO */ cli hlt + +.globl ss_fault +ss_fault: + push_trapframe_ec $TRAP_SS + + handle_trap + + /* TODO */ + cli + hlt diff --git a/sys/arch/amd64/amd64/trap.c b/sys/arch/amd64/amd64/trap.c index b73048d..f1e58f1 100644 --- a/sys/arch/amd64/amd64/trap.c +++ b/sys/arch/amd64/amd64/trap.c @@ -44,7 +44,8 @@ static const char *trap_type[] = { [TRAP_SEGNP] = "segment not present", [TRAP_PROTFLT] = "general protection", [TRAP_PAGEFLT] = "page fault", - [TRAP_NMI] = "non-maskable interrupt" + [TRAP_NMI] = "non-maskable interrupt", + [TRAP_SS] = "stack-segment fault" }; static const int TRAP_COUNT = __ARRAY_COUNT(trap_type); @@ -54,12 +55,17 @@ dbg_errcode(struct trapframe *tf) { uint64_t ec = tf->error_code; - if (tf->trapno == TRAP_PAGEFLT) { + switch (tf->trapno) { + case TRAP_PAGEFLT: kprintf("bits (pwui): %c%c%c%c\n", __TEST(ec, __BIT(0)) ? 'p' : '-', __TEST(ec, __BIT(1)) ? 'w' : '-', __TEST(ec, __BIT(2)) ? 'u' : '-', __TEST(ec, __BIT(4)) ? 'i' : '-'); + break; + case TRAP_SS: + kprintf("ss: 0x%x\n", ec); + break; } } @@ -115,5 +121,5 @@ trap_handler(struct trapframe *tf) } regdump(tf); - panic("Caught pre-sched exception\n"); + panic("Halted\n"); } diff --git a/sys/fs/initramfs.c b/sys/fs/initramfs.c index c0eaa0c..6da0929 100644 --- a/sys/fs/initramfs.c +++ b/sys/fs/initramfs.c @@ -103,7 +103,7 @@ vop_vget(struct vnode *parent, const char *name, struct vnode **vp) return -ENOENT; } - if (hdr->type != TAR_TYPEFLAG_DIR) { + if (hdr->type == TAR_TYPEFLAG_DIR) { vtype = VDIR; } @@ -149,8 +149,8 @@ static char * get_module(const char *path, uint64_t *size) { for (uint64_t i = 0; i < mod_req.response->module_count; ++i) { if (strcmp(mod_req.response->modules[i]->path, path) == 0) { - *size = mod_req.response->modules[i]->size; - return mod_req.response->modules[i]->address; + *size = mod_req.response->modules[i]->size; + return mod_req.response->modules[i]->address; } } @@ -173,6 +173,7 @@ static int initramfs_init(struct fs_info *info) { initramfs = get_module("/boot/initramfs.tar", &initramfs_size); + info->caps = FSCAP_FULLPATH; if (initramfs == NULL) { panic("Failed to load initramfs\n"); @@ -221,7 +222,7 @@ initramfs_open(const char *path) } hdr = initramfs_from_path((void *)initramfs, path); - return hdr_to_contents(hdr); + return (hdr == NULL) ? NULL : hdr_to_contents(hdr); } struct vfsops g_initramfs_ops = { diff --git a/sys/include/arch/amd64/cpu.h b/sys/include/arch/amd64/cpu.h index e2ed851..ee9bad7 100644 --- a/sys/include/arch/amd64/cpu.h +++ b/sys/include/arch/amd64/cpu.h @@ -110,6 +110,46 @@ amd64_read_gs_base(void) return rdmsr(IA32_KERNEL_GS_BASE); } +static inline uint64_t +amd64_read_cr0(void) +{ + uint64_t cr0; + __ASMV("mov %%cr0, %0" : "=r" (cr0) :: "memory"); + return cr0; +} + +static inline void +amd64_write_cr0(uint64_t val) +{ + __ASMV("mov %0, %%cr0" :: "r" (val) : "memory"); +} + +static inline uint64_t +amd64_read_cr4(void) +{ + uint64_t cr4; + __ASMV("mov %%cr4, %0" : "=r" (cr4) :: "memory"); + return cr4; +} + +static inline void +amd64_write_cr4(uint64_t val) +{ + __ASMV("mov %0, %%cr4" :: "r" (val) : "memory"); +} + +static inline void +amd64_fxsave(void *area) +{ + __ASMV("fxsave (%0)" :: "r" (area) : "memory"); +} + +static inline void +amd64_fxrstor(void *area) +{ + __ASMV("fxrstor (%0)" :: "r" (area) : "memory"); +} + struct cpu_info *amd64_this_cpu(void); #endif /* !_AMD64_CPU_H_ */ diff --git a/sys/include/arch/amd64/frame.h b/sys/include/arch/amd64/frame.h index a2e1a05..298a836 100644 --- a/sys/include/arch/amd64/frame.h +++ b/sys/include/arch/amd64/frame.h @@ -67,5 +67,12 @@ struct trapframe { (FRAME)->rsp = SP; \ (FRAME)->ss = 0x10; \ +#define init_frame_user(FRAME, IP, SP) \ + (FRAME)->rip = IP; \ + (FRAME)->cs = 0x18 | 3; \ + (FRAME)->rflags = 0x202; \ + (FRAME)->rsp = SP; \ + (FRAME)->ss = 0x20 | 3; \ + #endif /* !defined(__ASSEMBLER__) */ #endif /* !_AMD64_FRAME_H_ */ diff --git a/sys/include/arch/amd64/pcb.h b/sys/include/arch/amd64/pcb.h new file mode 100644 index 0000000..0e0aab8 --- /dev/null +++ b/sys/include/arch/amd64/pcb.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AMD64_PCB_H_ +#define _AMD64_PCB_H_ + +#include <sys/types.h> + +struct pcb { + uint8_t *fpu_state; +}; + +#endif /* !_AMD64_PCB_H_ */ diff --git a/sys/include/arch/amd64/trap.h b/sys/include/arch/amd64/trap.h index 1019999..c75fa28 100644 --- a/sys/include/arch/amd64/trap.h +++ b/sys/include/arch/amd64/trap.h @@ -47,6 +47,7 @@ #define TRAP_PROTFLT 9 /* General protection */ #define TRAP_PAGEFLT 10 /* Page fault */ #define TRAP_NMI 11 /* Non-maskable interrupt */ +#define TRAP_SS 12 /* Stack-segment fault */ /* Trap is coming from user mode */ #define TRAP_USER 0x100 @@ -65,6 +66,7 @@ void segnp(void *sf); void general_prot(void *sf); void page_fault(void *sf); void nmi(void *sf); +void ss_fault(void *sf); void trap_handler(struct trapframe *tf); #else .macro handle_trap diff --git a/sys/include/sys/filedesc.h b/sys/include/sys/filedesc.h new file mode 100644 index 0000000..ef74fb1 --- /dev/null +++ b/sys/include/sys/filedesc.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_FILEDESC_H_ +#define _SYS_FILEDESC_H_ + +#include <sys/vnode.h> +#include <sys/spinlock.h> +#include <sys/types.h> + +struct proc; + +struct filedesc { + int fdno; + off_t offset; + bool is_dir; + struct vnode *vnode; + struct spinlock lock; +}; + +#if defined(_KERNEL) +struct filedesc *fd_alloc(struct proc *td); +struct filedesc *fd_from_fdnum(const struct proc *td, int fdno); +void fd_close_fdnum(struct proc *td, int fdno); +#endif + +#endif diff --git a/sys/include/sys/loader.h b/sys/include/sys/loader.h index c1aa426..74a325c 100644 --- a/sys/include/sys/loader.h +++ b/sys/include/sys/loader.h @@ -33,17 +33,22 @@ #include <sys/types.h> #include <vm/pmap.h> -#define AT_NULL 0 -#define AT_IGNORE 1 -#define AT_EXECFD 2 -#define AT_PHDR 3 -#define AT_PHENT 4 -#define AT_PHNUM 5 -#define AT_PAGESZ 6 -#define AT_BASE 7 -#define AT_FLAGS 8 -#define AT_ENTRY 9 -#define AT_SECURE 10 +/* DANGER!: DO NOT CHANGE THESE DEFINES */ +#define AT_NULL 0 +#define AT_ENTRY 1 +#define AT_PHDR 2 +#define AT_PHENT 3 +#define AT_PHNUM 4 +#define AT_EXECPATH 5 +#define AT_SECURE 6 +#define AT_RANDOM 7 +#define AT_EXECFN 8 + +#define STACK_PUSH(ptr, val) *(--ptr) = val +#define AUXVAL(ptr, tag, val) __extension__ ({ \ + STACK_PUSH(ptr, val); \ + STACK_PUSH(ptr, tag); \ +}); /* Auxiliary Vector */ struct auxval { diff --git a/sys/include/sys/machdep.h b/sys/include/sys/machdep.h index 713b7db..0c06374 100644 --- a/sys/include/sys/machdep.h +++ b/sys/include/sys/machdep.h @@ -32,11 +32,15 @@ #include <sys/types.h> #include <sys/cdefs.h> +#include <sys/proc.h> #if defined(_KERNEL) #define MAXCPUS 32 +int processor_init_pcb(struct proc *proc); +int processor_free_pcb(struct proc *proc); +void processor_switch_to(struct proc *old_td, struct proc *new_td); void processor_init(void); void processor_halt(void); void intr_mask(void); diff --git a/sys/include/sys/mount.h b/sys/include/sys/mount.h index 3ac7ec7..209fa3e 100644 --- a/sys/include/sys/mount.h +++ b/sys/include/sys/mount.h @@ -33,6 +33,7 @@ #include <sys/types.h> #include <sys/queue.h> #include <sys/vnode.h> +#include <sys/cdefs.h> #define FS_NAME_MAX 16 /* Max length of FS type name including nul */ @@ -54,9 +55,15 @@ struct fs_info { char name[FS_NAME_MAX]; /* Filesystem type name */ struct vfsops *vfsops; /* Filesystem operations */ struct mount *mp_root; + uint16_t caps; }; /* + * Filesystem capabilities + */ +#define FSCAP_FULLPATH __BIT(0) /* Requires full path per lookup */ + +/* * Mount flags */ #define MNT_RDONLY 0x00000001 diff --git a/sys/include/sys/proc.h b/sys/include/sys/proc.h index f45e4c6..c6046d7 100644 --- a/sys/include/sys/proc.h +++ b/sys/include/sys/proc.h @@ -32,8 +32,13 @@ #include <sys/types.h> #include <sys/queue.h> +#include <sys/filedesc.h> #include <machine/cpu.h> #include <machine/frame.h> +#include <machine/pcb.h> +#include <vm/vm.h> + +#define PROC_MAX_FDS 256 /* * A task running on the CPU e.g., a process or @@ -43,6 +48,11 @@ struct proc { pid_t pid; struct cpu_info *cpu; struct trapframe *tf; + struct pcb pcb; + struct vas addrsp; + uintptr_t stack_base; + uint8_t is_user; + struct filedesc *fds[PROC_MAX_FDS]; TAILQ_ENTRY(proc) link; }; diff --git a/sys/include/sys/sched.h b/sys/include/sys/sched.h index d803df0..1fa947e 100644 --- a/sys/include/sys/sched.h +++ b/sys/include/sys/sched.h @@ -37,7 +37,9 @@ #include <machine/cpu.h> #include <machine/frame.h> +struct proc *this_td(void); void sched_init(void); +void sched_exit(void); void sched_context_switch(struct trapframe *tf); __noreturn diff --git a/sys/include/sys/syscall.h b/sys/include/sys/syscall.h new file mode 100644 index 0000000..66dc5f3 --- /dev/null +++ b/sys/include/sys/syscall.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_SYSCALL_H_ +#define _SYS_SYSCALL_H_ + +#include <sys/types.h> +#if defined(_KERNEL) +#include <machine/frame.h> +#endif + +/* Do not reorder */ +enum { + SYS_exit = 1, + __MAX_SYSCALLS +}; + +struct syscall_args { + uint64_t code; + uint64_t arg0, arg1, arg2, arg3, arg4; + uint64_t ip; + uint64_t sp; +}; + +#if defined(_KERNEL) +extern uint64_t(*g_syscall_table[__MAX_SYSCALLS])(struct syscall_args *args); +void __syscall(struct trapframe *tf); +#endif + +#endif diff --git a/sys/include/sys/vfs.h b/sys/include/sys/vfs.h index a684c3f..c1bef53 100644 --- a/sys/include/sys/vfs.h +++ b/sys/include/sys/vfs.h @@ -42,7 +42,7 @@ void vfs_init(void); struct fs_info *vfs_byname(const char *name); int vfs_vget(struct vnode *parent, const char *name, struct vnode **vp); -struct vnode *vfs_path_to_node(const char *path); +int vfs_path_to_node(const char *path, struct vnode **vp); char *vfs_get_fname_at(const char *path, size_t idx); int vfs_rootname(const char *path, char **new_path); bool vfs_is_valid_path(const char *path); diff --git a/sys/include/sys/vnode.h b/sys/include/sys/vnode.h index f584356..545e38f 100644 --- a/sys/include/sys/vnode.h +++ b/sys/include/sys/vnode.h @@ -47,6 +47,7 @@ struct vnode { struct mount *mp; /* Ptr to vfs vnode is in */ struct vops *vops; struct vnode *parent; + struct fs_info *fs; /* Filesystem this vnode belongs to, can be NULL */ void *data; /* Filesystem specific data */ }; diff --git a/sys/include/vm/pmap.h b/sys/include/vm/pmap.h index ebabd32..3380199 100644 --- a/sys/include/vm/pmap.h +++ b/sys/include/vm/pmap.h @@ -90,6 +90,12 @@ struct vas pmap_read_vas(void); int pmap_map(struct vm_ctx *, struct vas, vaddr_t, paddr_t, vm_prot_t); /* + * Get rid of a virtual address space and free + * resources. + */ +int pmap_free_vas(struct vm_ctx *, struct vas); + +/* * Unmap a page. */ int pmap_unmap(struct vm_ctx *, struct vas, vaddr_t); diff --git a/sys/include/vm/vm.h b/sys/include/vm/vm.h index 2a24d76..48e1b8f 100644 --- a/sys/include/vm/vm.h +++ b/sys/include/vm/vm.h @@ -61,5 +61,6 @@ vm_get_page_size(void) void vm_init(void); struct vm_ctx *vm_get_ctx(void); +struct vas vm_get_kvas(void); #endif /* !_VM_H_ */ diff --git a/sys/kern/kern_filedesc.c b/sys/kern/kern_filedesc.c new file mode 100644 index 0000000..a943714 --- /dev/null +++ b/sys/kern/kern_filedesc.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/filedesc.h> +#include <sys/proc.h> +#include <sys/sched.h> +#include <vm/dynalloc.h> +#include <assert.h> +#include <string.h> + +/* + * Allocate a file descriptor. + * + * @td: Thread to allocate from, NULL for current thread. + */ +struct filedesc * +fd_alloc(struct proc *td) +{ + struct filedesc *fd; + + if (td == NULL) { + td = this_td(); + __assert(td != NULL); + } + + for (size_t i = 0; i < PROC_MAX_FDS; ++i) { + if (td->fds[i] != NULL) { + continue; + } + + fd = dynalloc(sizeof(struct filedesc)); + memset(fd, 0, sizeof(struct filedesc)); + + if (fd == NULL) { + return NULL; + } + + fd->fdno = i; + td->fds[i] = fd; + return fd; + } + + return NULL; +} + +/* + * Fetch a file descriptor from a file descriptor + * number. + * + * @td: Thread to fetch from, NULL for current thread. + * @fdno: File descriptor to fetch + */ +struct filedesc * +fd_from_fdnum(const struct proc *td, int fdno) +{ + if (td == NULL) { + td = this_td(); + __assert(td != NULL); + } + + if (fdno < 0 || fdno > PROC_MAX_FDS) { + return NULL; + } + + for (size_t i = 0; i < PROC_MAX_FDS; ++i) { + if (i == fdno && td->fds[i] != NULL) { + return td->fds[i]; + } + } + + return NULL; +} + +/* + * Close a file descriptor from its fd number. + * + * @td: Thread to fetch from, NULL for current thread. + * @fdno: File descriptor number to close. + */ +void +fd_close_fdnum(struct proc *td, int fdno) +{ + struct filedesc *fd; + + if (td == NULL) { + td = this_td(); + __assert(td != NULL); + } + + fd = fd_from_fdnum(td, fdno); + if (fd == NULL) { + return; + } + + dynfree(fd); + td->fds[fdno] = NULL; +} diff --git a/sys/kern/kern_loader.c b/sys/kern/kern_loader.c index 53ba8c2..23249ea 100644 --- a/sys/kern/kern_loader.c +++ b/sys/kern/kern_loader.c @@ -61,12 +61,10 @@ int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv, vm_prot_t prot = PROT_USER; uintptr_t physmem; - uintptr_t max_addr, map_addr; - size_t misalign, page_count; - + size_t misalign, page_count, map_len; int status; - const size_t GRANULE = vm_get_page_size(); + const size_t GRANULE = vm_get_page_size(); void *tmp_ptr; if (auxv == NULL) { @@ -93,26 +91,8 @@ int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv, misalign = phdr->p_vaddr & (GRANULE - 1); page_count = __DIV_ROUNDUP(phdr->p_memsz + misalign, GRANULE); - max_addr = phdr->p_vaddr + (GRANULE * page_count); - - /* - * We are assuming this is a user program that we are loading. - * All user programs should be on the lower half of the address - * space. We will check that before we begin doing anything here. - * - * We are also going to check if the virtual address the program - * header refers to overflows into the higher half. If anything - * goes into the higher half, we won't simply drop the phdr, - * we'll instead assume caller error and return -EINVAL. - */ - if (phdr->p_vaddr >= VM_HIGHER_HALF) { - return -EINVAL; - } else if (max_addr >= VM_HIGHER_HALF) { - /* Overflows into higher half */ - return -EINVAL; - } - physmem = vm_alloc_pageframe(page_count); + map_len = page_count * GRANULE; /* Do we not have enough page frames? */ if (physmem == 0) { @@ -121,14 +101,9 @@ int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv, return -ENOMEM; } - map_addr = phdr->p_vaddr + load_base; - status = vm_map_create(vas, map_addr, physmem, prot, page_count*GRANULE); + status = vm_map_create(vas, phdr->p_vaddr + load_base, physmem, prot, map_len); if (status != 0) { - DBG("Failed to map 0x%p - 0x%p\n", - phdr->p_vaddr + load_base, - (phdr->p_vaddr + load_base) + (page_count * GRANULE)); - return status; } diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index f61c592..7b3776d 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -34,10 +34,36 @@ #include <sys/timer.h> #include <sys/cdefs.h> #include <sys/spinlock.h> +#include <sys/loader.h> +#include <sys/panic.h> +#include <sys/machdep.h> +#include <sys/filedesc.h> +#include <fs/initramfs.h> #include <vm/dynalloc.h> +#include <vm/physseg.h> +#include <vm/pmap.h> +#include <vm/map.h> +#include <vm/vm.h> #include <assert.h> #include <string.h> +#define STACK_PAGES 8 +#define STACK_SIZE (STACK_PAGES*vm_get_page_size()) + +/* + * The PHYS_TO_VIRT/VIRT_TO_PHYS macros convert + * addresses to lower and higher half addresses. + * Userspace addresses are on the lower half, + * therefore, we can just wrap over these to + * keep things simple. + * + * XXX: TODO: This won't work when not identity mapping + * lowerhalf addresses. Once that is updated, + * get rid of this. + */ +#define USER_TO_KERN(user) PHYS_TO_VIRT(user) +#define KERN_TO_USER(kern) VIRT_TO_PHYS(kern) + /* * Thread ready queue - all threads ready to be * scheduled should be added to this queue. @@ -51,9 +77,6 @@ static size_t nthread = 0; */ static struct spinlock tdq_lock = {0}; -/* In sys/<machine>/<machine>/switch.S */ -void __sched_switch_to(struct trapframe *tf); - static inline void sched_oneshot(void) { @@ -97,13 +120,11 @@ sched_dequeue_td(void) spinlock_acquire(&tdq_lock); - if (TAILQ_EMPTY(&td_queue)) { - goto done; + if (!TAILQ_EMPTY(&td_queue)) { + td = TAILQ_FIRST(&td_queue); + TAILQ_REMOVE(&td_queue, td, link); } - td = TAILQ_FIRST(&td_queue); - TAILQ_REMOVE(&td_queue, td, link); -done: spinlock_release(&tdq_lock); return td; } @@ -121,12 +142,101 @@ sched_enter(void) } } +static uintptr_t +sched_init_stack(void *stack_top, char *argvp[], char *envp[], struct auxval auxv) +{ + uintptr_t *sp = stack_top; + void *env_ptr = NULL, *argv_ptr = NULL; + size_t argc, envc, len; + + /* Copy argument and environment strings */ + for (envc = 0; envp[envc] != NULL; ++envc) { + len = strlen(envp[envc]); + sp -= len - 1; + memcpy(sp, envp[envc], len); + } + + __assert(envc >= 1); + env_ptr = sp; + + for (argc = 0; argvp[argc] != NULL; ++argc) { + len = strlen(argvp[argc]); + sp -= len - 1; + memcpy(sp, argvp[argc], len); + } + + __assert(argc >= 1); + argv_ptr = sp; + + /* Ensure the stack is aligned */ + sp = (void *)__ALIGN_DOWN((uintptr_t)sp, 16); + if (((argc + envc + 1) & 1) != 0) + --sp; + + AUXVAL(sp, AT_NULL, 0x0); + AUXVAL(sp, AT_SECURE, 0x0); + AUXVAL(sp, AT_ENTRY, auxv.at_entry); + AUXVAL(sp, AT_PHDR, auxv.at_phdr); + AUXVAL(sp, AT_PHNUM, auxv.at_phnum); + STACK_PUSH(sp, 0); + + /* Push environment string pointers */ + for (int i = 0; i < envc; ++i) { + len = strlen(env_ptr); + sp -= len; + + *sp = (uintptr_t)KERN_TO_USER((uintptr_t)env_ptr); + env_ptr = (char *)env_ptr + len; + } + + /* Push argument string pointers */ + STACK_PUSH(sp, 0); + for (int i = 0; i < argc; ++i) { + len = strlen(argv_ptr); + sp -= len; + + *sp = (uintptr_t)KERN_TO_USER((uintptr_t)argv_ptr); + argv_ptr = (char *)argv_ptr + len; + } + + STACK_PUSH(sp, argc); + + return (uintptr_t)sp; +} + +static uintptr_t +sched_create_stack(struct vas vas, bool user, char *argvp[], + char *envp[], struct auxval auxv, struct proc *td) +{ + int status; + uintptr_t stack; + const vm_prot_t USER_STACK_PROT = PROT_WRITE | PROT_USER; + + if (!user) { + stack = (uintptr_t)dynalloc(STACK_SIZE); + td->stack_base = (uintptr_t)stack; + return sched_init_stack((void *)(stack + STACK_SIZE), argvp, envp, auxv); + } + + stack = vm_alloc_pageframe(STACK_PAGES); + td->stack_base = stack; + status = vm_map_create(vas, stack, stack, USER_STACK_PROT, STACK_SIZE); + + if (status != 0) { + return 0; + } + + memset(USER_TO_KERN(stack), 0, STACK_SIZE); + stack = sched_init_stack((void *)USER_TO_KERN(stack + STACK_SIZE), argvp, envp, auxv); + return stack; +} + static struct proc * -sched_create_td(uintptr_t rip) +sched_create_td(uintptr_t rip, char *argvp[], char *envp[], struct auxval auxv, + struct vas vas, bool is_user) { - const size_t STACK_SIZE = 0x100000; /* 1 MiB */ struct proc *td; - void *stack; + uintptr_t stack; struct trapframe *tf; tf = dynalloc(sizeof(struct trapframe)); @@ -134,32 +244,103 @@ sched_create_td(uintptr_t rip) return NULL; } - stack = dynalloc(STACK_SIZE); - if (stack == NULL) { + td = dynalloc(sizeof(struct proc)); + if (td == NULL) { + /* TODO: Free stack */ dynfree(tf); return NULL; } - td = dynalloc(sizeof(struct proc)); - if (td == NULL) { + stack = sched_create_stack(vas, is_user, argvp, envp, auxv, td); + if (stack == 0) { dynfree(tf); - dynfree(stack); + dynfree(td); return NULL; } memset(tf, 0, sizeof(struct trapframe)); - memset(stack, 0, STACK_SIZE); + memset(td, 0, sizeof(struct proc)); /* Setup process itself */ td->pid = 0; /* Don't assign PID until enqueued */ td->cpu = NULL; /* Not yet assigned a core */ td->tf = tf; + td->addrsp = vas; + td->is_user = is_user; + processor_init_pcb(td); + + /* Allocate standard file descriptors */ + __assert(fd_alloc(td) != NULL); /* STDIN */ + __assert(fd_alloc(td) != NULL); /* STDOUT */ + __assert(fd_alloc(td) != NULL); /* STDERR */ /* Setup trapframe */ - init_frame(tf, rip, (uintptr_t)stack + STACK_SIZE - 1); + if (!is_user) { + init_frame(tf, rip, (uintptr_t)stack); + } else { + init_frame_user(tf, rip, KERN_TO_USER(stack)); + } return td; } +static void +sched_destroy_td(struct proc *td) +{ + processor_free_pcb(td); + + /* + * User stacks are allocated with vm_alloc_pageframe(), + * while kernel stacks are allocated with dynalloc(). + * We want to check if we are a user program or kernel + * program to perform the proper deallocation method. + */ + if (td->is_user) { + vm_free_pageframe(td->stack_base, STACK_PAGES); + } else { + dynfree((void *)td->stack_base); + } + + /* Close all of the file descriptors */ + for (size_t i = 0; i < PROC_MAX_FDS; ++i) { + fd_close_fdnum(td, i); + } + + pmap_free_vas(vm_get_ctx(), td->addrsp); + dynfree(td); +} + +void +sched_exit(void) +{ + struct proc *td; + struct vas kvas = vm_get_kvas(); + + intr_mask(); + + td = this_td(); + + /* Switch back to the kernel address space and destroy ourself */ + pmap_switch_vas(vm_get_ctx(), kvas); + sched_destroy_td(td); + + intr_unmask(); + sched_enter(); +} + +/* + * Get the current running thread. + */ +struct proc * +this_td(void) +{ + struct sched_state *state; + struct cpu_info *ci; + + ci = this_cpu(); + state = &ci->sched_state; + return state->td; +} + /* * Thread context switch routine */ @@ -174,43 +355,73 @@ sched_context_switch(struct trapframe *tf) * If we have no threads, we should not * preempt at all. */ - if (nthread == 0) { - goto done; - } else if ((next_td = sched_dequeue_td()) == NULL) { - /* Empty */ - goto done; + if (nthread == 0 || (next_td = sched_dequeue_td()) == NULL) { + sched_oneshot(); + return; } + /* + * If we have a thread currently running and we are switching + * to another, we shall save our current register state + * by copying the trapframe. + */ if (state->td != NULL) { - /* Save our trapframe */ td = state->td; memcpy(td->tf, tf, sizeof(struct trapframe)); } - /* Copy to stack */ + /* Copy over the next thread's register state to us */ memcpy(tf, next_td->tf, sizeof(struct trapframe)); td = state->td; state->td = next_td; + /* Re-enqueue the previous thread if it exists */ if (td != NULL) { sched_enqueue_td(td); } -done: + + /* Do architecture specific context switch logic */ + processor_switch_to(td, next_td); + + /* Done, switch out our vas and oneshot */ + pmap_switch_vas(vm_get_ctx(), next_td->addrsp); sched_oneshot(); } void sched_init(void) { + struct proc *init; + struct auxval auxv = {0}, ld_auxv = {0}; + struct vas vas = pmap_create_vas(vm_get_ctx()); + const char *init_bin, *ld_bin; + + char *ld_path; + char *argv[] = {"/usr/sbin/init", NULL}; + char *envp[] = {"", NULL}; + TAILQ_INIT(&td_queue); - /* - * TODO: Create init with sched_create_td() - * and enqueue with sched_enqueue_td() - */ - (void)sched_create_td; - (void)sched_enqueue_td; + if ((init_bin = initramfs_open("/usr/sbin/init")) == NULL) { + panic("Could not open /usr/boot/init\n"); + } + if (loader_load(vas, init_bin, &auxv, 0, &ld_path) != 0) { + panic("Could not load init\n"); + } + if ((ld_bin = initramfs_open(ld_path)) == NULL) { + panic("Could not open %s\n", ld_path); + } + if (loader_load(vas, ld_bin, &ld_auxv, 0x00, NULL) != 0) { + panic("Could not load %s\n", ld_path); + } + + init = sched_create_td((uintptr_t)ld_auxv.at_entry, argv, envp, ld_auxv, vas, true); + if (init == NULL) { + panic("Failed to create thread for init\n"); + } + + sched_enqueue_td(init); } /* diff --git a/sys/kern/kern_syscall.c b/sys/kern/kern_syscall.c new file mode 100644 index 0000000..b6e31d1 --- /dev/null +++ b/sys/kern/kern_syscall.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/syscall.h> +#include <sys/sched.h> +#include <sys/cdefs.h> +#include <sys/types.h> + +__noreturn static uint64_t +sys_exit(struct syscall_args *args) +{ + sched_exit(); + __builtin_unreachable(); +} + +uint64_t(*g_syscall_table[__MAX_SYSCALLS])(struct syscall_args *args) = { + sys_exit, +}; diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c index 18ab3ac..0d8d194 100644 --- a/sys/kern/vfs_init.c +++ b/sys/kern/vfs_init.c @@ -40,8 +40,10 @@ __MODULE_NAME("vfs"); __KERNEL_META("$Hyra$: vfs.c, Ian Marco Moffett, " "Hyra Virtual File System"); +#define INITRAMFS_ID 0 + static struct fs_info filesystems[] = { - { "initramfs", &g_initramfs_ops } + [INITRAMFS_ID] = { "initramfs", &g_initramfs_ops } }; struct vnode *g_root_vnode = NULL; @@ -76,4 +78,5 @@ vfs_init(void) } g_root_vnode->vops = &g_initramfs_vops; + g_root_vnode->fs = &filesystems[INITRAMFS_ID]; } diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index 748d5e2..1398964 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -29,11 +29,13 @@ #include <sys/types.h> #include <sys/vfs.h> +#include <sys/mount.h> +#include <sys/errno.h> #include <vm/dynalloc.h> #include <string.h> /* - * Fetches the filename within a past at + * Fetches the filename within a path at * the nth index denoted by `idx' * * Returns memory allocated by dynalloc() @@ -115,9 +117,58 @@ vfs_get_fname_at(const char *path, size_t idx) return ret; } -struct vnode * -vfs_path_to_node(const char *path) +/* + * Fetches a vnode from a path. + * + * @path: Path to fetch vnode from. + * @vp: Output var for fetched vnode. + * + * Returns 0 on success. + */ +int +vfs_path_to_node(const char *path, struct vnode **vp) { - /* TODO */ - return NULL; + struct vnode *vnode = g_root_vnode; + struct fs_info *fs; + char *name; + int s = 0, fs_caps = 0; + + if (strcmp(path, "/") == 0 || !vfs_is_valid_path(path)) { + return -1; + } else if (*path != '/') { + return -1; + } + + /* Fetch filesystem capabilities if we can */ + if (vnode->fs != NULL) { + fs = vnode->fs; + fs_caps = fs->caps; + } + + /* + * If the filesystem requires full-path lookups, we can try + * throwing the full path at the filesystem to see if + * it'll give us a vnode. + */ + if (__TEST(fs_caps, FSCAP_FULLPATH)) { + s = vfs_vget(g_root_vnode, path, &vnode); + goto done; + } + + for (size_t i = 0;; ++i) { + name = vfs_get_fname_at(path, i); + if (name == NULL) break; + + s = vfs_vget(vnode, name, &vnode); + dynfree(name); + + if (s != 0) break; + } + +done: + if (vp != NULL && s == 0) { + *vp = vnode; + } + + return s; } diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c index f15cb0b..6096059 100644 --- a/sys/vm/vm_init.c +++ b/sys/vm/vm_init.c @@ -59,6 +59,15 @@ vm_get_ctx(void) return &bsp_vm_ctx; } +/* + * Return the kernel VAS. + */ +struct vas +vm_get_kvas(void) +{ + return kernel_vas; +} + void vm_init(void) { diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 0d27738..f65bad2 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -78,12 +78,13 @@ int vm_map_create(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot, size_t bytes) { size_t granule = vm_get_page_size(); + size_t misalign = va & (granule - 1); int s; struct vm_ctx *ctx = vm_get_ctx(); /* We want bytes to be aligned by the granule */ - bytes = __ALIGN_UP(bytes, granule); + bytes = __ALIGN_UP(bytes + misalign, granule); /* Align VA/PA by granule */ va = __ALIGN_DOWN(va, granule); @@ -115,10 +116,11 @@ vm_map_destroy(struct vas vas, vaddr_t va, size_t bytes) { struct vm_ctx *ctx = vm_get_ctx(); size_t granule = vm_get_page_size(); + size_t misalign = va & (granule - 1); int s; /* We want bytes to be aligned by the granule */ - bytes = __ALIGN_UP(bytes, granule); + bytes = __ALIGN_UP(bytes + misalign, granule); /* Align VA by granule */ va = __ALIGN_DOWN(va, granule); |