summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2025-04-18 21:57:44 -0400
committerIan Moffett <ian@osmora.org>2025-04-18 21:57:44 -0400
commit9906547712a88cf4dc012a6f6bd6e2ad04c5e3f3 (patch)
tree41b40ec97f5082793b08a495f6a935bc3c1ed25f /sys/kern
parent0b5adaff02190dad76d845381a41b998696d9e97 (diff)
parent92d4f9dae64ab5325feca1f39e5955415e8275b9 (diff)
Merge branch 'expt' into aarch64
Signed-off-by: Ian Moffett <ian@osmora.org>
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/exec_elf64.c5
-rw-r--r--sys/kern/init_main.c29
-rw-r--r--sys/kern/kern_descrip.c18
-rw-r--r--sys/kern/kern_sched.c18
-rw-r--r--sys/kern/kern_synch.c52
5 files changed, 101 insertions, 21 deletions
diff --git a/sys/kern/exec_elf64.c b/sys/kern/exec_elf64.c
index c9040dd..3767b0b 100644
--- a/sys/kern/exec_elf64.c
+++ b/sys/kern/exec_elf64.c
@@ -30,6 +30,7 @@
#include <sys/elf.h>
#include <sys/exec.h>
#include <sys/param.h>
+#include <sys/syslog.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/vnode.h>
@@ -42,6 +43,9 @@
#include <string.h>
#include <machine/pcb.h>
+#define pr_trace(fmt, ...) kprintf("elf64: " fmt, ##__VA_ARGS__)
+#define pr_error(...) pr_trace(__VA_ARGS__)
+
#define PHDR(HDRP, IDX) \
(void *)((uintptr_t)HDRP + (HDRP)->e_phoff + (HDRP->e_phentsize * IDX))
@@ -209,6 +213,7 @@ elf64_load(const char *pathname, struct proc *td, struct exec_prog *prog)
/* Try to allocate page frames */
physmem = vm_alloc_frame(page_count);
if (physmem == 0) {
+ pr_error("out of physical memory\n");
status = -ENOMEM;
break;
}
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index f3f88d7..799d352 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -38,14 +38,23 @@
#include <dev/cons/cons.h>
#include <dev/acpi/acpi.h>
#include <machine/cpu.h>
+#include <machine/cdefs.h>
#include <vm/vm.h>
#include <string.h>
static struct proc proc0;
static void
+copyright(void)
+{
+ kprintf(OMIT_TIMESTAMP
+ "Copyright (c) 2023-2025 Ian Marco Moffett and the OSMORA team\n");
+}
+
+static void
start_init(void)
{
+#if 0
struct proc *td = this_td();
struct execve_args execve_args;
char *argv[] = { "/usr/sbin/init", NULL };
@@ -56,6 +65,8 @@ start_init(void)
execve_args.envp = envp;
if (execve(td, &execve_args) != 0)
panic("failed to load init\n");
+#endif
+ for (;;);
}
int
@@ -64,14 +75,15 @@ main(void)
/* Setup serial driver */
serial_init();
+ /* Init the virtual memory subsystem */
+ vm_init();
+
/* Startup the console */
cons_init();
+ copyright();
kprintf("Starting Hyra/%s v%s: %s\n", HYRA_ARCH, HYRA_VERSION,
HYRA_BUILDDATE);
- /* Init the virtual memory subsystem */
- vm_init();
-
/* Start the ACPI subsystem */
acpi_init();
@@ -81,19 +93,22 @@ main(void)
/* Init the virtual file system */
vfs_init();
- DRIVERS_INIT();
-
/* Expose the console to devfs */
cons_expose();
/* Start scheduler and bootstrap APs */
+ md_intoff();
sched_init();
- mp_bootstrap_aps(&g_bsp_ci);
- /* Startup init */
+ /* Startup pid 1 */
memset(&proc0, 0, sizeof(proc0.tf));
fork1(&proc0, 0, start_init, NULL);
+ /* Load all drivers */
+ DRIVERS_INIT();
+
+ /* Bootstrap APs and here we go! */
+ mp_bootstrap_aps(&g_bsp_ci);
sched_enter();
__builtin_unreachable();
}
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 201db3e..d122e89 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -148,6 +148,7 @@ static int
fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write)
{
char *kbuf = NULL;
+ ssize_t n;
struct filedesc *filedes;
struct sio_txn sio;
scret_t retval = 0;
@@ -194,22 +195,21 @@ fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write)
}
/* Call VFS write hook */
- if ((count = vfs_vop_write(filedes->vp, &sio)) < 0) {
- retval = -EIO;
+ if ((n = vfs_vop_write(filedes->vp, &sio)) < 0) {
+ retval = n;
goto done;
}
} else {
- if ((count = vfs_vop_read(filedes->vp, &sio)) < 0) {
- retval = -EIO;
+ if ((n = vfs_vop_read(filedes->vp, &sio)) < 0) {
+ retval = n;
goto done;
}
- }
- if (copyout(kbuf, buf, count) < 0) {
- retval = -EFAULT;
- goto done;
+ if (copyout(kbuf, buf, count) < 0) {
+ retval = -EFAULT;
+ goto done;
+ }
}
-
retval = count;
done:
if (kbuf != NULL) {
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 16daae2..4bbe5a0 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -33,8 +33,10 @@
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/syslog.h>
+#include <sys/atomic.h>
#include <machine/frame.h>
#include <machine/cpu.h>
+#include <machine/cdefs.h>
#include <vm/pmap.h>
#include <dev/timer.h>
#include <assert.h>
@@ -44,7 +46,7 @@
void sched_switch(struct trapframe *tf);
-static sched_policy_t policy = SCHED_POLICY_RR;
+static sched_policy_t policy = SCHED_POLICY_MLFQ;
/*
* Thread ready queues - all threads ready to be
@@ -105,12 +107,14 @@ sched_dequeue_td(void)
if (!TAILQ_EMPTY(&queue->q)) {
td = TAILQ_FIRST(&queue->q);
TAILQ_REMOVE(&queue->q, td, link);
- break;
+ spinlock_release(&tdq_lock);
+ return td;
}
}
+ /* We got nothing */
spinlock_release(&tdq_lock);
- return td;
+ return NULL;
}
/*
@@ -237,8 +241,12 @@ sched_switch(struct trapframe *tf)
void
sched_enter(void)
{
- sched_oneshot(false);
- for (;;);
+ md_inton();
+ md_sync_all();
+ for (;;) {
+ sched_oneshot(false);
+ md_pause();
+ }
}
void
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2011c61..57b27d0 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -30,6 +30,7 @@
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/errno.h>
+#include <sys/atomic.h>
#include <sys/syslog.h>
#include <sys/spinlock.h>
#include <dev/timer.h>
@@ -37,6 +38,9 @@
#define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__)
#define pr_error(...) pr_trace(__VA_ARGS__)
+/* XXX: Be very careful with this */
+static struct spinlock __syslock;
+
/*
* Returns 0 on success, returns non-zero value
* on timeout/failure.
@@ -79,8 +83,56 @@ spinlock_acquire(struct spinlock *lock)
while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE));
}
+/*
+ * Lazy acquire a spinlock
+ *
+ * spinlock_try_acquire() may only spin one thread
+ * at a time, threads that want to spin too must
+ * explicity do it on their own.
+ *
+ * This function returns 1 (a value that may be
+ * spinned on) when the lock is acquired and a
+ * thread is already spinning on it.
+ */
+int
+spinlock_try_acquire(struct spinlock *lock)
+{
+ volatile int locked;
+
+ locked = atomic_load_int(&lock->lock);
+ if (locked != 0) {
+ return 1;
+ }
+
+ while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE));
+ return 0;
+}
+
void
spinlock_release(struct spinlock *lock)
{
__atomic_clear(&lock->lock, __ATOMIC_RELEASE);
}
+
+/*
+ * Attempt to hold the system-wide lock, returns 1
+ * if already held.
+ *
+ * XXX: Only use for CRITICAL code sections.
+ */
+int
+syslock(void)
+{
+ return spinlock_try_acquire(&__syslock);
+}
+
+/*
+ * Release the system-wide lock
+ *
+ * XXX: Only use for CRITICAL code sections.
+ */
+void
+sysrel(void)
+{
+ spinlock_release(&__syslock);
+}