diff options
author | Ian Moffett <ian@osmora.org> | 2024-06-24 23:00:41 -0400 |
---|---|---|
committer | Ian Moffett <ian@osmora.org> | 2024-06-24 23:00:41 -0400 |
commit | 963478ff0ad358fc3a44ff5dba0c64ddc889f296 (patch) | |
tree | a14c3ea6feb7d1b783364737fcefabce249473ff /sys/kern/kern_sched.c | |
parent | 236963e7563be3e3f8220dac7bb4af446928e194 (diff) | |
parent | 6f6a36d1e8b3dd50cb4d394fa1de4888663b4ea5 (diff) |
Import hyra expt
Diffstat (limited to 'sys/kern/kern_sched.c')
-rw-r--r-- | sys/kern/kern_sched.c | 179 |
1 files changed, 179 insertions, 0 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c new file mode 100644 index 0000000..c370311 --- /dev/null +++ b/sys/kern/kern_sched.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/sched.h> +#include <sys/schedvar.h> +#include <sys/cdefs.h> +#include <sys/syslog.h> +#include <machine/frame.h> +#include <machine/cpu.h> +#include <vm/pmap.h> +#include <dev/timer.h> +#include <assert.h> +#include <string.h> + +#define pr_trace(fmt, ...) kprintf("ksched: " fmt, ##__VA_ARGS__) + +void sched_switch(struct trapframe *tf); + +static sched_policy_t policy = SCHED_POLICY_RR; + +/* + * Thread ready queues - all threads ready to be + * scheduled should be added to the toplevel queue. + */ +static struct sched_queue qlist[SCHED_NQUEUE]; + +/* + * Thread queue lock - all operations to `qlist' + * must be done with this lock acquired. + */ +__cacheline_aligned static struct spinlock tdq_lock = {0}; + +/* + * Perform timer oneshot + */ +static inline void +sched_oneshot(bool now) +{ + struct timer timer; + size_t usec = now ? SHORT_TIMESLICE_USEC : DEFAULT_TIMESLICE_USEC; + tmrr_status_t tmr_status; + + tmr_status = req_timer(TIMER_SCHED, &timer); + __assert(tmr_status == TMRR_SUCCESS); + + timer.oneshot_us(usec); +} + +static struct proc * +sched_dequeue_td(void) +{ + struct sched_queue *queue; + struct proc *td = NULL; + + spinlock_acquire(&tdq_lock); + + for (size_t i = 0; i < SCHED_NQUEUE; ++i) { + queue = &qlist[i]; + if (!TAILQ_EMPTY(&queue->q)) { + td = TAILQ_FIRST(&queue->q); + TAILQ_REMOVE(&queue->q, td, link); + break; + } + } + + spinlock_release(&tdq_lock); + return td; +} + +/* + * Add a thread to the scheduler. + */ +void +sched_enqueue_td(struct proc *td) +{ + struct sched_queue *queue; + + spinlock_acquire(&tdq_lock); + queue = &qlist[td->priority]; + + TAILQ_INSERT_TAIL(&queue->q, td, link); + spinlock_release(&tdq_lock); +} + +/* + * Return the currently running thread. + */ +struct proc * +this_td(void) +{ + struct cpu_info *ci; + + ci = this_cpu(); + return ci->curtd; +} + +/* + * Perform a context switch. + * + * TODO + */ +void +sched_switch(struct trapframe *tf) +{ + struct cpu_info *ci; + struct pcb *pcbp; + struct proc *next_td, *td; + + ci = this_cpu(); + td = ci->curtd; + + /* Do we have threads to switch to? */ + if ((next_td = sched_dequeue_td()) == NULL) { + sched_oneshot(false); + return; + } + + /* Re-enqueue the old thread */ + if (td != NULL) { + memcpy(&td->tf, tf, sizeof(td->tf)); + sched_enqueue_td(td); + } + + memcpy(tf, &next_td->tf, sizeof(*tf)); + ci->curtd = next_td; + pcbp = &next_td->pcb; + + pmap_switch_vas(pcbp->addrsp); + sched_oneshot(false); +} + +/* + * Main scheduler loop + */ +void +sched_enter(void) +{ + sched_oneshot(false); + for (;;); +} + +void +sched_init(void) +{ + /* Setup the queues */ + for (int i = 0; i < SCHED_NQUEUE; ++i) { + TAILQ_INIT(&qlist[i].q); + } + + pr_trace("Prepared %d queues (policy=0x%x)\n", + SCHED_NQUEUE, policy); +} |