From 451631e73f59f0383425bcf19479814771e68879 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Tue, 15 Apr 2025 23:13:10 -0400 Subject: kernel: sched: Run oneshots forever Signed-off-by: Ian Moffett --- sys/kern/kern_sched.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index 16daae2..ca5bfbe 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -237,8 +237,10 @@ sched_switch(struct trapframe *tf) void sched_enter(void) { - sched_oneshot(false); - for (;;); + for (;;) { + sched_oneshot(false); + md_pause(); + } } void -- cgit v1.2.3 From b1642ad065b04f452227bff58e951f67fb4cec47 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 00:42:57 -0400 Subject: kernel: synch: Add lazy spinlock acquire Signed-off-by: Ian Moffett --- sys/include/sys/spinlock.h | 4 +++- sys/kern/kern_synch.c | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) (limited to 'sys/kern') diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h index c136e05..b416152 100644 --- a/sys/include/sys/spinlock.h +++ b/sys/include/sys/spinlock.h @@ -33,13 +33,15 @@ #include struct spinlock { - volatile bool lock; + volatile int lock; }; #if defined(_KERNEL) void spinlock_acquire(struct spinlock *lock); void spinlock_release(struct spinlock *lock); + +int spinlock_try_acquire(struct spinlock *lock); int spinlock_usleep(struct spinlock *lock, size_t usec_max); #endif diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 2011c61..2b64673 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -79,6 +80,31 @@ spinlock_acquire(struct spinlock *lock) while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); } +/* + * Lazy acquire a spinlock + * + * spinlock_try_acquire() may only spin one thread + * at a time, threads that want to spin too must + * explicity do it on their own. + * + * This function returns 1 (a value that may be + * spinned on) when the lock is acquired and a + * thread is already spinning on it. + */ +int +spinlock_try_acquire(struct spinlock *lock) +{ + volatile int locked; + + locked = atomic_load_int(&lock->lock); + if (locked != 0) { + return 1; + } + + while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); + return 0; +} + void spinlock_release(struct spinlock *lock) { -- cgit v1.2.3 From be63b6e102a617a048160c42c84ef46fa38e6aad Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 00:45:10 -0400 Subject: kernel: sched: Returns as soon as result Signed-off-by: Ian Moffett --- sys/kern/kern_sched.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index ca5bfbe..c1eb3d8 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -105,12 +105,14 @@ sched_dequeue_td(void) if (!TAILQ_EMPTY(&queue->q)) { td = TAILQ_FIRST(&queue->q); TAILQ_REMOVE(&queue->q, td, link); - break; + spinlock_release(&tdq_lock); + return td; } } + /* We got nothing */ spinlock_release(&tdq_lock); - return td; + return NULL; } /* -- cgit v1.2.3 From 5f13039023890e9c634912704464601f199c672d Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 02:53:00 -0400 Subject: kernel: Enable interrupts upon sched entry Signed-off-by: Ian Moffett --- sys/arch/amd64/amd64/machdep.c | 11 ++++++++++- sys/include/arch/amd64/cdefs.h | 1 + sys/include/arch/amd64/sync.h | 35 +++++++++++++++++++++++++++++++++++ sys/kern/kern_sched.c | 14 ++++++++++++++ 4 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 sys/include/arch/amd64/sync.h (limited to 'sys/kern') diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c index a5fb4bf..d54ea22 100644 --- a/sys/arch/amd64/amd64/machdep.c +++ b/sys/arch/amd64/amd64/machdep.c @@ -207,6 +207,16 @@ this_cpu(void) return ci; } +/* + * Sync all system operation + */ +int +md_sync_all(void) +{ + lapic_eoi(); + return 0; +} + void cpu_startup(struct cpu_info *ci) { @@ -220,6 +230,5 @@ cpu_startup(struct cpu_info *ci) init_tss(ci); try_mitigate_spectre(); - __ASMV("sti"); /* Unmask interrupts */ lapic_init(); } diff --git a/sys/include/arch/amd64/cdefs.h b/sys/include/arch/amd64/cdefs.h index 98d3f0b..29a8841 100644 --- a/sys/include/arch/amd64/cdefs.h +++ b/sys/include/arch/amd64/cdefs.h @@ -31,6 +31,7 @@ #define _AMD64_CDEFS_H_ #include +#include #define md_pause() __ASMV("rep; nop") diff --git a/sys/include/arch/amd64/sync.h b/sys/include/arch/amd64/sync.h new file mode 100644 index 0000000..f331f43 --- /dev/null +++ b/sys/include/arch/amd64/sync.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_SYNC_H_ +#define _MACHINE_SYNC_H_ + +int md_sync_all(void); + +#endif /* !_MACHINE_SYNC_H_ */ diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index c1eb3d8..386406e 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -33,8 +33,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -239,6 +241,18 @@ sched_switch(struct trapframe *tf) void sched_enter(void) { + static int nenter = 0; + + /* + * Enable interrupts for all processors and + * sync on first entry. + */ + md_inton(); + if (nenter == 0) { + md_sync_all(); + atomic_inc_int(&nenter); + } + for (;;) { sched_oneshot(false); md_pause(); -- cgit v1.2.3 From e30fcbe3822ab740cef6ff2584dd0a1429ab10a5 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 02:58:20 -0400 Subject: kernel: synch: Add system-wide locking Add system-wide locking for critical code sections. Signed-off-by: Ian Moffett --- sys/include/sys/spinlock.h | 3 +++ sys/kern/kern_synch.c | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'sys/kern') diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h index b416152..140addc 100644 --- a/sys/include/sys/spinlock.h +++ b/sys/include/sys/spinlock.h @@ -44,6 +44,9 @@ void spinlock_release(struct spinlock *lock); int spinlock_try_acquire(struct spinlock *lock); int spinlock_usleep(struct spinlock *lock, size_t usec_max); +/* System-wide locking (be careful!!) */ +int syslock(void); +void sysrel(void); #endif #endif /* !_SYS_SPINLOCK_H_ */ diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 2b64673..57b27d0 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -38,6 +38,9 @@ #define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) +/* XXX: Be very careful with this */ +static struct spinlock __syslock; + /* * Returns 0 on success, returns non-zero value * on timeout/failure. @@ -110,3 +113,26 @@ spinlock_release(struct spinlock *lock) { __atomic_clear(&lock->lock, __ATOMIC_RELEASE); } + +/* + * Attempt to hold the system-wide lock, returns 1 + * if already held. + * + * XXX: Only use for CRITICAL code sections. + */ +int +syslock(void) +{ + return spinlock_try_acquire(&__syslock); +} + +/* + * Release the system-wide lock + * + * XXX: Only use for CRITICAL code sections. + */ +void +sysrel(void) +{ + spinlock_release(&__syslock); +} -- cgit v1.2.3 From 27d771013d7fc923b8079b6a0246ba3bc4e83d58 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 03:01:30 -0400 Subject: kernel: sched: Default to MLFQ Signed-off-by: Ian Moffett --- sys/kern/kern_sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index 386406e..35a1af7 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -46,7 +46,7 @@ void sched_switch(struct trapframe *tf); -static sched_policy_t policy = SCHED_POLICY_RR; +static sched_policy_t policy = SCHED_POLICY_MLFQ; /* * Thread ready queues - all threads ready to be -- cgit v1.2.3 From 866fbbb96dca022394bc8e0abbfb52293ad2a3b5 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 03:09:15 -0400 Subject: kernel: Reconsider the idea of init systems Signed-off-by: Ian Moffett --- sys/kern/init_main.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index f3f88d7..20514b8 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,7 @@ static struct proc proc0; static void start_init(void) { +#if 0 struct proc *td = this_td(); struct execve_args execve_args; char *argv[] = { "/usr/sbin/init", NULL }; @@ -56,6 +58,8 @@ start_init(void) execve_args.envp = envp; if (execve(td, &execve_args) != 0) panic("failed to load init\n"); +#endif + for (;;); } int @@ -81,19 +85,22 @@ main(void) /* Init the virtual file system */ vfs_init(); - DRIVERS_INIT(); - /* Expose the console to devfs */ cons_expose(); /* Start scheduler and bootstrap APs */ + md_intoff(); sched_init(); - mp_bootstrap_aps(&g_bsp_ci); - /* Startup init */ + /* Startup pid 1 */ memset(&proc0, 0, sizeof(proc0.tf)); fork1(&proc0, 0, start_init, NULL); + /* Load all drivers */ + DRIVERS_INIT(); + + /* Bootstrap APs and here we go! */ + mp_bootstrap_aps(&g_bsp_ci); sched_enter(); __builtin_unreachable(); } -- cgit v1.2.3 From b71e2c4eb42dd4f0279b4b4c1986e4d0983a78e5 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 03:50:59 -0400 Subject: kernel: Add copyright Signed-off-by: Ian Moffett --- sys/kern/init_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'sys/kern') diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 20514b8..aa78e8d 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -44,6 +44,13 @@ static struct proc proc0; +static void +copyright(void) +{ + kprintf(OMIT_TIMESTAMP + "Copyright (c) 2023-2025 Ian Marco Moffett and the OSMORA team\n"); +} + static void start_init(void) { @@ -70,6 +77,7 @@ main(void) /* Startup the console */ cons_init(); + copyright(); kprintf("Starting Hyra/%s v%s: %s\n", HYRA_ARCH, HYRA_VERSION, HYRA_BUILDDATE); -- cgit v1.2.3 From 25837e0c189af9e90dbe6c249edf155ef8eccc43 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Fri, 18 Apr 2025 21:38:05 -0400 Subject: kernel: Initialize vm subsystem earlier Signed-off-by: Ian Moffett --- sys/kern/init_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index aa78e8d..799d352 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -75,15 +75,15 @@ main(void) /* Setup serial driver */ serial_init(); + /* Init the virtual memory subsystem */ + vm_init(); + /* Startup the console */ cons_init(); copyright(); kprintf("Starting Hyra/%s v%s: %s\n", HYRA_ARCH, HYRA_VERSION, HYRA_BUILDDATE); - /* Init the virtual memory subsystem */ - vm_init(); - /* Start the ACPI subsystem */ acpi_init(); -- cgit v1.2.3 From ba4a5da7ec6c43fd965753f699fc08357e597291 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Fri, 18 Apr 2025 21:38:45 -0400 Subject: kernel: descrip: Fix copyin/copyout + rw operation Use ssize_t to avoid unsigned integer underflows and only call copyout() during file reads Signed-off-by: Ian Moffett --- sys/kern/kern_descrip.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index 201db3e..d122e89 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -148,6 +148,7 @@ static int fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write) { char *kbuf = NULL; + ssize_t n; struct filedesc *filedes; struct sio_txn sio; scret_t retval = 0; @@ -194,22 +195,21 @@ fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write) } /* Call VFS write hook */ - if ((count = vfs_vop_write(filedes->vp, &sio)) < 0) { - retval = -EIO; + if ((n = vfs_vop_write(filedes->vp, &sio)) < 0) { + retval = n; goto done; } } else { - if ((count = vfs_vop_read(filedes->vp, &sio)) < 0) { - retval = -EIO; + if ((n = vfs_vop_read(filedes->vp, &sio)) < 0) { + retval = n; goto done; } - } - if (copyout(kbuf, buf, count) < 0) { - retval = -EFAULT; - goto done; + if (copyout(kbuf, buf, count) < 0) { + retval = -EFAULT; + goto done; + } } - retval = count; done: if (kbuf != NULL) { -- cgit v1.2.3 From e4648e44a871e4d570acb7e149797759ff4be7fc Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Fri, 18 Apr 2025 21:40:30 -0400 Subject: kernel: sched: Always sync on sched entry Signed-off-by: Ian Moffett --- sys/kern/kern_sched.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index 35a1af7..4bbe5a0 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -241,18 +241,8 @@ sched_switch(struct trapframe *tf) void sched_enter(void) { - static int nenter = 0; - - /* - * Enable interrupts for all processors and - * sync on first entry. - */ md_inton(); - if (nenter == 0) { - md_sync_all(); - atomic_inc_int(&nenter); - } - + md_sync_all(); for (;;) { sched_oneshot(false); md_pause(); -- cgit v1.2.3 From 6afe6a3384d6b11cc1c895d8631c54959d2a7556 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Fri, 18 Apr 2025 21:48:50 -0400 Subject: kernel: exec_elf64: Better logging Signed-off-by: Ian Moffett --- sys/kern/exec_elf64.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'sys/kern') diff --git a/sys/kern/exec_elf64.c b/sys/kern/exec_elf64.c index c9040dd..3767b0b 100644 --- a/sys/kern/exec_elf64.c +++ b/sys/kern/exec_elf64.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,9 @@ #include #include +#define pr_trace(fmt, ...) kprintf("elf64: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + #define PHDR(HDRP, IDX) \ (void *)((uintptr_t)HDRP + (HDRP)->e_phoff + (HDRP->e_phentsize * IDX)) @@ -209,6 +213,7 @@ elf64_load(const char *pathname, struct proc *td, struct exec_prog *prog) /* Try to allocate page frames */ physmem = vm_alloc_frame(page_count); if (physmem == 0) { + pr_error("out of physical memory\n"); status = -ENOMEM; break; } -- cgit v1.2.3