summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2025-07-16 20:44:54 -0400
committerIan Moffett <ian@osmora.org>2025-07-16 20:44:54 -0400
commitf6900b5c3b4832f48359fd761b80f8693a6735e8 (patch)
tree4a9b10e1935f5c2dc004ebe34038c2ef6738b841
parenta2a7a6c1137e38e1443b69c09296644b00ba4bd2 (diff)
kernel: Sort out switch MI/MD switch logic
Certain operations dealing with context switches are better off being implemented as machine dependent routines which allows us to be flexible with what we can do, as well as the extra benefit of requiring less workarounds to keep things MI. Signed-off-by: Ian Moffett <ian@osmora.org>
-rw-r--r--sys/arch/amd64/amd64/lapic_intr.S2
-rw-r--r--sys/arch/amd64/amd64/proc_machdep.c66
-rw-r--r--sys/include/sys/sched.h3
-rw-r--r--sys/include/sys/schedvar.h6
-rw-r--r--sys/kern/kern_sched.c70
5 files changed, 85 insertions, 62 deletions
diff --git a/sys/arch/amd64/amd64/lapic_intr.S b/sys/arch/amd64/amd64/lapic_intr.S
index 5ae8f39..1413660 100644
--- a/sys/arch/amd64/amd64/lapic_intr.S
+++ b/sys/arch/amd64/amd64/lapic_intr.S
@@ -33,6 +33,6 @@
.globl lapic_tmr_isr
INTRENTRY(lapic_tmr_isr, handle_lapic_tmr)
handle_lapic_tmr:
- call sched_switch // Context switch per every timer IRQ
+ call md_sched_switch // Context switch per every timer IRQ
call lapic_eoi // Done! Signal that we finished to the Local APIC
retq
diff --git a/sys/arch/amd64/amd64/proc_machdep.c b/sys/arch/amd64/amd64/proc_machdep.c
index 63604a4..72c2b56 100644
--- a/sys/arch/amd64/amd64/proc_machdep.c
+++ b/sys/arch/amd64/amd64/proc_machdep.c
@@ -32,6 +32,8 @@
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/exec.h>
+#include <sys/sched.h>
+#include <sys/schedvar.h>
#include <machine/frame.h>
#include <machine/gdt.h>
#include <machine/cpu.h>
@@ -220,3 +222,67 @@ md_spawn(struct proc *p, struct proc *parent, uintptr_t ip)
tfp->rsp = ALIGN_DOWN((stack_base + PROC_STACK_SIZE) - 1, 16);
return 0;
}
+
+/*
+ * Save thread state and enqueue it back into one
+ * of the ready queues.
+ */
+static void
+sched_save_td(struct proc *td, struct trapframe *tf)
+{
+ /*
+ * Save trapframe to process structure only
+ * if PROC_EXEC is not set.
+ */
+ if (!ISSET(td->flags, PROC_EXEC)) {
+ memcpy(&td->tf, tf, sizeof(td->tf));
+ }
+
+ sched_enqueue_td(td);
+}
+
+static void
+sched_switch_to(struct trapframe *tf, struct proc *td)
+{
+ struct cpu_info *ci;
+ struct pcb *pcbp;
+
+ ci = this_cpu();
+
+ if (tf != NULL) {
+ memcpy(tf, &td->tf, sizeof(*tf));
+ }
+
+ ci->curtd = td;
+ pcbp = &td->pcb;
+ pmap_switch_vas(pcbp->addrsp);
+}
+
+/*
+ * Perform a context switch.
+ */
+void
+md_sched_switch(struct trapframe *tf)
+{
+ struct proc *next_td, *td;
+ struct cpu_info *ci;
+
+ ci = this_cpu();
+ td = ci->curtd;
+ mi_sched_switch(td);
+
+ if (td != NULL) {
+ if (td->pid == 0)
+ return;
+
+ sched_save_td(td, tf);
+ }
+
+ if ((next_td = sched_dequeue_td()) == NULL) {
+ sched_oneshot(false);
+ return;
+ }
+
+ sched_switch_to(tf, next_td);
+ sched_oneshot(false);
+}
diff --git a/sys/include/sys/sched.h b/sys/include/sys/sched.h
index f4d99f9..d80483a 100644
--- a/sys/include/sys/sched.h
+++ b/sys/include/sys/sched.h
@@ -50,9 +50,8 @@ struct sched_stat {
void sched_stat(struct sched_stat *statp);
void sched_init(void);
-void sched_yield(void);
-void sched_switch_to(struct trapframe *tf, struct proc *td);
+void sched_yield(void);
void sched_detach(struct proc *td);
__dead void sched_enter(void);
diff --git a/sys/include/sys/schedvar.h b/sys/include/sys/schedvar.h
index 5ed9f5f..017fcb7 100644
--- a/sys/include/sys/schedvar.h
+++ b/sys/include/sys/schedvar.h
@@ -60,5 +60,11 @@ struct sched_queue {
size_t nthread;
};
+struct proc *sched_dequeue_td(void);
+void mi_sched_switch(struct proc *from);
+
+void md_sched_switch(struct trapframe *tf);
+void sched_oneshot(bool now);
+
#endif /* _KERNEL */
#endif /* !_SYS_SCHEDVAR_H_ */
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 02d19df..e259a2c 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -45,7 +45,7 @@
#define pr_trace(fmt, ...) kprintf("ksched: " fmt, ##__VA_ARGS__)
-void sched_switch(struct trapframe *tf);
+void md_sched_switch(struct trapframe *tf);
void sched_accnt_init(void);
static sched_policy_t policy = SCHED_POLICY_MLFQ;
@@ -65,7 +65,7 @@ __cacheline_aligned static struct spinlock tdq_lock = {0};
/*
* Perform timer oneshot
*/
-static inline void
+void
sched_oneshot(bool now)
{
struct timer timer;
@@ -78,25 +78,7 @@ sched_oneshot(bool now)
timer.oneshot_us(usec);
}
-/*
- * Save thread state and enqueue it back into one
- * of the ready queues.
- */
-static void
-sched_save_td(struct proc *td, struct trapframe *tf)
-{
- /*
- * Save trapframe to process structure only
- * if PROC_EXEC is not set.
- */
- if (!ISSET(td->flags, PROC_EXEC)) {
- memcpy(&td->tf, tf, sizeof(td->tf));
- }
-
- sched_enqueue_td(td);
-}
-
-static struct proc *
+struct proc *
sched_dequeue_td(void)
{
struct sched_queue *queue;
@@ -198,52 +180,22 @@ td_pri_update(struct proc *td)
}
}
-void
-sched_switch_to(struct trapframe *tf, struct proc *td)
-{
- struct cpu_info *ci;
- struct pcb *pcbp;
-
- ci = this_cpu();
-
- if (tf != NULL) {
- memcpy(tf, &td->tf, sizeof(*tf));
- }
-
- ci->curtd = td;
- pcbp = &td->pcb;
- pmap_switch_vas(pcbp->addrsp);
-}
-
/*
- * Perform a context switch.
+ * MI work to be done during a context
+ * switch. Called by md_sched_switch()
*/
void
-sched_switch(struct trapframe *tf)
+mi_sched_switch(struct proc *from)
{
- struct proc *next_td, *td;
- struct cpu_info *ci;
-
- ci = this_cpu();
- td = ci->curtd;
- cons_detach();
-
- if (td != NULL) {
- if (td->pid == 0)
+ if (from != NULL) {
+ if (from->pid == 0)
return;
- dispatch_signals(td);
- td_pri_update(td);
- sched_save_td(td, tf);
+ dispatch_signals(from);
+ td_pri_update(from);
}
- if ((next_td = sched_dequeue_td()) == NULL) {
- sched_oneshot(false);
- return;
- }
-
- sched_switch_to(tf, next_td);
- sched_oneshot(false);
+ cons_detach();
}
/*