summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2024-02-23 17:38:36 -0500
committerIan Moffett <ian@osmora.org>2024-02-23 17:38:36 -0500
commit839034c1309bc331e4a44c8e0153f013a93ba5b5 (patch)
tree1fff6d3b251bcf7cdf1c87ecd68dfb87a1e1e136
parent4c00208b3a50be0bc6dd240e59d6d891ef18d8b3 (diff)
kernel: Add initial scheduler implementation
Signed-off-by: Ian Moffett <ian@osmora.org>
-rw-r--r--sys/arch/amd64/amd64/local_intr.S19
-rw-r--r--sys/arch/amd64/amd64/switch.S50
-rw-r--r--sys/include/sys/proc.h2
-rw-r--r--sys/include/sys/sched.h4
-rw-r--r--sys/include/sys/sched_state.h6
-rw-r--r--sys/kern/init_main.c5
-rw-r--r--sys/kern/kern_sched.c197
7 files changed, 246 insertions, 37 deletions
diff --git a/sys/arch/amd64/amd64/local_intr.S b/sys/arch/amd64/amd64/local_intr.S
index abfd9e0..b7b6345 100644
--- a/sys/arch/amd64/amd64/local_intr.S
+++ b/sys/arch/amd64/amd64/local_intr.S
@@ -27,14 +27,19 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <machine/frameasm.h>
+#include <machine/trap.h>
+#include <sys/cdefs.h>
+
+__KERNEL_META "$Hyra$: local_intr.S, Ian Marco Moffett, \
+ Routines for handling Local Interrupts"
+
.text
.globl lapic_tmr_isr
lapic_tmr_isr:
- lea stub_msg(%rip), %rdi
- call kprintf
- cli
- hlt
-
-.section .rodata
-stub_msg: .ascii "**LAPIC TIMER ISR IS A STUB; HALTING**\n\0"
+ push_trapframe $0
+ mov %rsp, %rdi
+ call sched_context_switch
+ pop_trapframe
+ iretq
diff --git a/sys/arch/amd64/amd64/switch.S b/sys/arch/amd64/amd64/switch.S
new file mode 100644
index 0000000..156012e
--- /dev/null
+++ b/sys/arch/amd64/amd64/switch.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <machine/frameasm.h>
+
+__KERNEL_META "$Hyra$: switch.S, Ian Marco Moffett, \
+ Low level context switch code"
+
+.text
+.globl __sched_switch_to
+
+/* void __sched_switch_to(struct trapframe *tf) */
+__sched_switch_to:
+ /* Sanity check */
+ or %rdi, %rdi
+ jz fail
+
+ /* Pop regs and switch */
+ mov %rdi, %rsp
+ pop_trapframe
+ iretq
+fail:
+ retq
diff --git a/sys/include/sys/proc.h b/sys/include/sys/proc.h
index 5106bf3..f45e4c6 100644
--- a/sys/include/sys/proc.h
+++ b/sys/include/sys/proc.h
@@ -33,6 +33,7 @@
#include <sys/types.h>
#include <sys/queue.h>
#include <machine/cpu.h>
+#include <machine/frame.h>
/*
* A task running on the CPU e.g., a process or
@@ -41,6 +42,7 @@
struct proc {
pid_t pid;
struct cpu_info *cpu;
+ struct trapframe *tf;
TAILQ_ENTRY(proc) link;
};
diff --git a/sys/include/sys/sched.h b/sys/include/sys/sched.h
index 4fdf065..d803df0 100644
--- a/sys/include/sys/sched.h
+++ b/sys/include/sys/sched.h
@@ -35,6 +35,10 @@
#include <sys/types.h>
#include <sys/spinlock.h>
#include <machine/cpu.h>
+#include <machine/frame.h>
+
+void sched_init(void);
+void sched_context_switch(struct trapframe *tf);
__noreturn
void sched_init_processor(struct cpu_info *ci);
diff --git a/sys/include/sys/sched_state.h b/sys/include/sys/sched_state.h
index 03a55dd..52d6c56 100644
--- a/sys/include/sys/sched_state.h
+++ b/sys/include/sys/sched_state.h
@@ -30,15 +30,13 @@
#ifndef _SYS_SCHED_STATE_H_
#define _SYS_SCHED_STATE_H_
-#include <sys/types.h>
-#include <sys/queue.h>
+#include <sys/proc.h>
/*
* Scheduler state, per CPU.
*/
struct sched_state {
- TAILQ_HEAD(, proc) queue;
- size_t queue_nrun; /* Number of processes in the run queue */
+ struct proc *td; /* Current_thread */
};
#endif /* !_SYS_SCHED_STATE_H_ */
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 2666b5e..6a60e2b 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -91,9 +91,12 @@ main(void)
processor_init();
list_timers();
+ sched_init();
ci = this_cpu();
- __TRY_CALL(ap_bootstrap, ci);
+ __TRY_CALL(ap_bootstrap, ci);
sched_init_processor(ci);
+
+ while (1);
__builtin_unreachable();
}
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 4f17ddf..81dd3a6 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -30,62 +30,209 @@
#include <sys/sched.h>
#include <sys/sched_state.h>
#include <sys/types.h>
+#include <sys/timer.h>
#include <sys/cdefs.h>
#include <sys/spinlock.h>
-#include <machine/cpu.h>
+#include <vm/dynalloc.h>
#include <assert.h>
+#include <string.h>
+
+#define DEFAULT_TIMESLICE_USEC 100000000
/*
- * This is the processor ready list, processors
- * (cores) that have no task assigned live here.
- *
- * Assigning a task to a core is done by popping from
- * this list. However, it must be done carefully and
- * must be serialized. You *must* acquire ci_ready_lock
- * before performing *any* operations on ci_ready_list!!!
+ * Thread ready queue - all threads ready to be
+ * scheduled should be added to this queue.
*/
-static TAILQ_HEAD(, cpu_info) ci_ready_list;
-static struct spinlock ci_ready_lock = {0};
+static TAILQ_HEAD(, proc) td_queue;
/*
- * Push a processor into the ready list.
+ * Thread queue lock - all operations to `td_queue'
+ * must be done with this lock acquired.
+ */
+static struct spinlock tdq_lock = {0};
+
+/* In sys/<machine>/<machine>/switch.S */
+void __sched_switch_to(struct trapframe *tf);
+
+static inline void
+sched_oneshot(void)
+{
+ struct timer timer;
+ tmrr_status_t tmr_status;
+
+ tmr_status = req_timer(TIMER_SCHED, &timer);
+ __assert(tmr_status == TMRR_SUCCESS);
+
+ timer.oneshot_us(DEFAULT_TIMESLICE_USEC);
+}
+
+/*
+ * Push a thread into the thread ready queue
+ * allowing it to be eventually dequeued
+ * and ran.
*/
static void
-sched_enqueue_ci(struct cpu_info *ci)
+sched_enqueue_td(struct proc *td)
+{
+ /* Sanity check */
+ if (td == NULL)
+ return;
+
+ spinlock_acquire(&tdq_lock);
+
+ td->pid = TAILQ_NELEM(&td_queue);
+ TAILQ_INSERT_TAIL(&td_queue, td, link);
+
+ spinlock_release(&tdq_lock);
+}
+
+/*
+ * Dequeue the first thread in the thread ready
+ * queue.
+ */
+static struct proc *
+sched_dequeue_td(void)
{
- spinlock_acquire(&ci_ready_lock);
- TAILQ_INSERT_TAIL(&ci_ready_list, ci, link);
- spinlock_release(&ci_ready_lock);
+ struct proc *td = NULL;
+
+ spinlock_acquire(&tdq_lock);
+
+ if (TAILQ_EMPTY(&td_queue)) {
+ goto done;
+ }
+
+ td = TAILQ_FIRST(&td_queue);
+ TAILQ_REMOVE(&td_queue, td, link);
+done:
+ spinlock_release(&tdq_lock);
+ return td;
}
+
/*
* Processor awaiting tasks to be assigned will be here spinning.
*/
__noreturn static void
sched_enter(void)
{
+ struct proc *td;
+ struct cpu_info *ci = this_cpu();
+ struct sched_state *state = &ci->sched_state;
+
for (;;) {
+ if ((td = sched_dequeue_td()) != NULL) {
+ state->td = td;
+ sched_oneshot();
+ __sched_switch_to(td->tf);
+ }
+
hint_spinwait();
}
}
+static struct proc *
+sched_create_td(uintptr_t rip)
+{
+ const size_t STACK_SIZE = 0x100000; /* 1 MiB */
+ struct proc *td;
+ void *stack;
+ struct trapframe *tf;
+
+ tf = dynalloc(sizeof(struct trapframe));
+ if (tf == NULL) {
+ return NULL;
+ }
+
+ stack = dynalloc(STACK_SIZE);
+ if (stack == NULL) {
+ dynfree(tf);
+ return NULL;
+ }
+
+ td = dynalloc(sizeof(struct proc));
+ if (td == NULL) {
+ dynfree(tf);
+ dynfree(stack);
+ return NULL;
+ }
+
+ memset(tf, 0, sizeof(struct trapframe));
+ memset(stack, 0, STACK_SIZE);
+
+ /* Setup process itself */
+ td->pid = 0; /* Don't assign PID until enqueued */
+ td->cpu = NULL; /* Not yet assigned a core */
+ td->tf = tf;
+
+ /* Setup trapframe */
+ init_frame(tf, rip, (uintptr_t)stack + STACK_SIZE - 1);
+ return td;
+}
+
/*
- * Setup scheduler related things and enqueue AP.
+ * Thread context switch routine
*/
void
-sched_init_processor(struct cpu_info *ci)
+sched_context_switch(struct trapframe *tf)
{
- struct sched_state *sched_state = &ci->sched_state;
- static bool is_init = true;
+ struct cpu_info *ci = this_cpu();
+ struct sched_state *state = &ci->sched_state;
+ struct proc *td, *next_td;
+
+ spinlock_acquire(&tdq_lock);
+ td = state->td;
+
+ /*
+ * If we have no current thread or the queue is empty,
+ * preempting would be bad because there is nothing to
+ * switch to. And if we only have one thread, there is
+ * no point in preempting.
+ */
+ if (td == NULL || TAILQ_NELEM(&td_queue) == 1) {
+ goto done;
+ } else if ((next_td = sched_dequeue_td()) == NULL) {
+ /* Empty */
+ goto done;
+ }
+
- if (is_init) {
- /* Setup ready list if first call */
- TAILQ_INIT(&ci_ready_list);
- is_init = false;
+ /* Save our trapframe */
+ memcpy(td->tf, tf, sizeof(struct trapframe));
+
+ if ((next_td = TAILQ_NEXT(td, link)) == NULL) {
+ /* We need to wrap to the first thread */
+ next_td = TAILQ_FIRST(&td_queue);
}
- TAILQ_INIT(&sched_state->queue);
- sched_enqueue_ci(ci);
+ /* Copy to stack */
+ memcpy(tf, next_td->tf, sizeof(struct trapframe));
+ state->td = next_td;
+done:
+ spinlock_release(&tdq_lock);
+ sched_oneshot();
+}
+
+void
+sched_init(void)
+{
+ TAILQ_INIT(&td_queue);
+
+ /*
+ * TODO: Create init with sched_create_td()
+ * and enqueue with sched_enqueue_td()
+ */
+ (void)sched_create_td;
+ (void)sched_enqueue_td;
+}
+
+/*
+ * Setup scheduler related things and enqueue AP.
+ */
+void
+sched_init_processor(struct cpu_info *ci)
+{
+ struct sched_state *sched_state = &ci->sched_state;
+ (void)sched_state; /* TODO */
sched_enter();