aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2024-04-26 14:29:07 -0400
committerIan Moffett <ian@osmora.org>2024-04-26 14:31:13 -0400
commit583fc1fa5a3612f3ab73bf19b93df23bd9fa1a64 (patch)
tree363368f41e9ae4c2a23f5538cc2859557ddb98bf
parent5bc15a222694d30f62721eecb3d99b7be006f971 (diff)
kernel: sched: Add support for early preemption
Signed-off-by: Ian Moffett <ian@osmora.org>
-rw-r--r--sys/include/sys/sched.h1
-rw-r--r--sys/include/sys/schedvar.h1
-rw-r--r--sys/kern/kern_sched.c26
3 files changed, 23 insertions, 5 deletions
diff --git a/sys/include/sys/sched.h b/sys/include/sys/sched.h
index 1fa947e..6689e3b 100644
--- a/sys/include/sys/sched.h
+++ b/sys/include/sys/sched.h
@@ -41,6 +41,7 @@ struct proc *this_td(void);
void sched_init(void);
void sched_exit(void);
void sched_context_switch(struct trapframe *tf);
+void sched_rest(void);
__noreturn
void sched_init_processor(struct cpu_info *ci);
diff --git a/sys/include/sys/schedvar.h b/sys/include/sys/schedvar.h
index f1759b7..431a93e 100644
--- a/sys/include/sys/schedvar.h
+++ b/sys/include/sys/schedvar.h
@@ -31,5 +31,6 @@
#define _SYS_SCHEDVAR_H_
#define DEFAULT_TIMESLICE_USEC 3000
+#define SHORT_TIMESLICE_USEC 10
#endif /* !_SYS_SCHEDVAR_H_ */
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 79480ec..1f302d9 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -78,16 +78,22 @@ static size_t nthread = 0;
*/
static struct spinlock tdq_lock = {0};
+/*
+ * Perform timer oneshot
+ *
+ * @now: True for shortest timeslice.
+ */
static inline void
-sched_oneshot(void)
+sched_oneshot(bool now)
{
struct timer timer;
+ size_t usec = (now) ? SHORT_TIMESLICE_USEC : DEFAULT_TIMESLICE_USEC;
tmrr_status_t tmr_status;
tmr_status = req_timer(TIMER_SCHED, &timer);
__assert(tmr_status == TMRR_SUCCESS);
- timer.oneshot_us(DEFAULT_TIMESLICE_USEC);
+ timer.oneshot_us(usec);
}
/*
@@ -144,7 +150,7 @@ sched_idle(void)
__noreturn static void
sched_enter(void)
{
- sched_oneshot();
+ sched_oneshot(false);
sched_idle();
__builtin_unreachable();
}
@@ -354,6 +360,16 @@ sched_make_idletd(void)
sched_enqueue_td(td);
}
+/*
+ * Cause an early preemption and lets
+ * the next thread run.
+ */
+void
+sched_rest(void)
+{
+ sched_oneshot(true);
+}
+
void
sched_exit(void)
{
@@ -406,7 +422,7 @@ sched_context_switch(struct trapframe *tf)
* preempt at all.
*/
if (nthread == 0 || (next_td = sched_dequeue_td()) == NULL) {
- sched_oneshot();
+ sched_oneshot(false);
return;
}
@@ -436,7 +452,7 @@ sched_context_switch(struct trapframe *tf)
/* Done, switch out our vas and oneshot */
pmap_switch_vas(vm_get_ctx(), next_td->addrsp);
- sched_oneshot();
+ sched_oneshot(false);
}
void