/* * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Hyra nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #define DEFAULT_TIMESLICE_USEC 100000000 /* * Thread ready queue - all threads ready to be * scheduled should be added to this queue. */ static TAILQ_HEAD(, proc) td_queue; /* * Thread queue lock - all operations to `td_queue' * must be done with this lock acquired. */ static struct spinlock tdq_lock = {0}; /* In sys///switch.S */ void __sched_switch_to(struct trapframe *tf); static inline void sched_oneshot(void) { struct timer timer; tmrr_status_t tmr_status; tmr_status = req_timer(TIMER_SCHED, &timer); __assert(tmr_status == TMRR_SUCCESS); timer.oneshot_us(DEFAULT_TIMESLICE_USEC); } /* * Push a thread into the thread ready queue * allowing it to be eventually dequeued * and ran. */ static void sched_enqueue_td(struct proc *td) { /* Sanity check */ if (td == NULL) return; spinlock_acquire(&tdq_lock); td->pid = TAILQ_NELEM(&td_queue); TAILQ_INSERT_TAIL(&td_queue, td, link); spinlock_release(&tdq_lock); } /* * Dequeue the first thread in the thread ready * queue. */ static struct proc * sched_dequeue_td(void) { struct proc *td = NULL; spinlock_acquire(&tdq_lock); if (TAILQ_EMPTY(&td_queue)) { goto done; } td = TAILQ_FIRST(&td_queue); TAILQ_REMOVE(&td_queue, td, link); done: spinlock_release(&tdq_lock); return td; } /* * Processor awaiting tasks to be assigned will be here spinning. */ __noreturn static void sched_enter(void) { struct proc *td; struct cpu_info *ci = this_cpu(); struct sched_state *state = &ci->sched_state; for (;;) { if ((td = sched_dequeue_td()) != NULL) { state->td = td; sched_oneshot(); __sched_switch_to(td->tf); } hint_spinwait(); } } static struct proc * sched_create_td(uintptr_t rip) { const size_t STACK_SIZE = 0x100000; /* 1 MiB */ struct proc *td; void *stack; struct trapframe *tf; tf = dynalloc(sizeof(struct trapframe)); if (tf == NULL) { return NULL; } stack = dynalloc(STACK_SIZE); if (stack == NULL) { dynfree(tf); return NULL; } td = dynalloc(sizeof(struct proc)); if (td == NULL) { dynfree(tf); dynfree(stack); return NULL; } memset(tf, 0, sizeof(struct trapframe)); memset(stack, 0, STACK_SIZE); /* Setup process itself */ td->pid = 0; /* Don't assign PID until enqueued */ td->cpu = NULL; /* Not yet assigned a core */ td->tf = tf; /* Setup trapframe */ init_frame(tf, rip, (uintptr_t)stack + STACK_SIZE - 1); return td; } /* * Thread context switch routine */ void sched_context_switch(struct trapframe *tf) { struct cpu_info *ci = this_cpu(); struct sched_state *state = &ci->sched_state; struct proc *td, *next_td; spinlock_acquire(&tdq_lock); td = state->td; /* * If we have no current thread or the queue is empty, * preempting would be bad because there is nothing to * switch to. And if we only have one thread, there is * no point in preempting. */ if (td == NULL || TAILQ_NELEM(&td_queue) == 1) { goto done; } else if ((next_td = sched_dequeue_td()) == NULL) { /* Empty */ goto done; } /* Save our trapframe */ memcpy(td->tf, tf, sizeof(struct trapframe)); if ((next_td = TAILQ_NEXT(td, link)) == NULL) { /* We need to wrap to the first thread */ next_td = TAILQ_FIRST(&td_queue); } /* Copy to stack */ memcpy(tf, next_td->tf, sizeof(struct trapframe)); state->td = next_td; done: spinlock_release(&tdq_lock); sched_oneshot(); } void sched_init(void) { TAILQ_INIT(&td_queue); /* * TODO: Create init with sched_create_td() * and enqueue with sched_enqueue_td() */ (void)sched_create_td; (void)sched_enqueue_td; } /* * Setup scheduler related things and enqueue AP. */ void sched_init_processor(struct cpu_info *ci) { struct sched_state *sched_state = &ci->sched_state; (void)sched_state; /* TODO */ sched_enter(); __builtin_unreachable(); }