diff options
author | Ian Moffett <ian@osmora.org> | 2024-06-23 22:25:13 -0400 |
---|---|---|
committer | Ian Moffett <ian@osmora.org> | 2024-06-23 22:30:35 -0400 |
commit | 00245135a7df4028df60f62f4041c1302e5b3381 (patch) | |
tree | 5a3ae4e1421f3463fc4b90774ed3fc322adac66f /sys/kern | |
parent | aa8d940e7e1cadf651054a58d6953fee03149587 (diff) |
kernel: sched: Add PCB and context switching
Signed-off-by: Ian Moffett <ian@osmora.org>
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_sched.c | 83 |
1 files changed, 80 insertions, 3 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index b79d682..c370311 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -33,8 +33,11 @@ #include <sys/cdefs.h> #include <sys/syslog.h> #include <machine/frame.h> +#include <machine/cpu.h> +#include <vm/pmap.h> #include <dev/timer.h> #include <assert.h> +#include <string.h> #define pr_trace(fmt, ...) kprintf("ksched: " fmt, ##__VA_ARGS__) @@ -49,6 +52,12 @@ static sched_policy_t policy = SCHED_POLICY_RR; static struct sched_queue qlist[SCHED_NQUEUE]; /* + * Thread queue lock - all operations to `qlist' + * must be done with this lock acquired. + */ +__cacheline_aligned static struct spinlock tdq_lock = {0}; + +/* * Perform timer oneshot */ static inline void @@ -64,6 +73,54 @@ sched_oneshot(bool now) timer.oneshot_us(usec); } +static struct proc * +sched_dequeue_td(void) +{ + struct sched_queue *queue; + struct proc *td = NULL; + + spinlock_acquire(&tdq_lock); + + for (size_t i = 0; i < SCHED_NQUEUE; ++i) { + queue = &qlist[i]; + if (!TAILQ_EMPTY(&queue->q)) { + td = TAILQ_FIRST(&queue->q); + TAILQ_REMOVE(&queue->q, td, link); + break; + } + } + + spinlock_release(&tdq_lock); + return td; +} + +/* + * Add a thread to the scheduler. + */ +void +sched_enqueue_td(struct proc *td) +{ + struct sched_queue *queue; + + spinlock_acquire(&tdq_lock); + queue = &qlist[td->priority]; + + TAILQ_INSERT_TAIL(&queue->q, td, link); + spinlock_release(&tdq_lock); +} + +/* + * Return the currently running thread. + */ +struct proc * +this_td(void) +{ + struct cpu_info *ci; + + ci = this_cpu(); + return ci->curtd; +} + /* * Perform a context switch. * @@ -72,10 +129,30 @@ sched_oneshot(bool now) void sched_switch(struct trapframe *tf) { - static struct spinlock lock = {0}; + struct cpu_info *ci; + struct pcb *pcbp; + struct proc *next_td, *td; + + ci = this_cpu(); + td = ci->curtd; + + /* Do we have threads to switch to? */ + if ((next_td = sched_dequeue_td()) == NULL) { + sched_oneshot(false); + return; + } + + /* Re-enqueue the old thread */ + if (td != NULL) { + memcpy(&td->tf, tf, sizeof(td->tf)); + sched_enqueue_td(td); + } + + memcpy(tf, &next_td->tf, sizeof(*tf)); + ci->curtd = next_td; + pcbp = &next_td->pcb; - spinlock_acquire(&lock); - spinlock_release(&lock); + pmap_switch_vas(pcbp->addrsp); sched_oneshot(false); } |