summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2025-08-21 20:11:31 -0400
committerIan Moffett <ian@osmora.org>2025-08-21 20:11:31 -0400
commit6bd8fc75891cf2c08aaf4584f0110ca1cbbf84db (patch)
treec4cc4cdda2d307ccd10c7396a10708e365b85958 /sys/arch
parent6da57a82e1b40f6f1f105aa990475b451d4958e9 (diff)
kernel/amd64: ipi: Greatly simplify IPI framework
The previous IPI framework design was quite an overengineered mess thanks to our friend, Φ of the body. - Use a flat array instead of a weird bitmap - Only use one ISR and chain the functions Signed-off-by: Ian Moffett <ian@osmora.org>
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/amd64/amd64/ipi.c271
-rw-r--r--sys/arch/amd64/amd64/machdep.c16
-rw-r--r--sys/arch/amd64/amd64/vector.S24
3 files changed, 59 insertions, 252 deletions
diff --git a/sys/arch/amd64/amd64/ipi.c b/sys/arch/amd64/amd64/ipi.c
index ffa291f..e367552 100644
--- a/sys/arch/amd64/amd64/ipi.c
+++ b/sys/arch/amd64/amd64/ipi.c
@@ -37,69 +37,28 @@
#include <machine/idt.h>
#include <machine/ipi.h>
#include <machine/lapic.h>
+#include <vm/dynalloc.h>
#include <string.h>
-void ipi_isr0(void);
-void ipi_isr1(void);
-void ipi_isr2(void);
-void ipi_isr3(void);
+void ipi_isr(void);
void __ipi_handle_common(void);
#define pr_trace(fmt, ...) kprintf("ipi: " fmt, ##__VA_ARGS__)
#define pr_error(...) pr_trace(__VA_ARGS__)
-#define BASE_VECTOR 0x21
#define COOKIE 0x7E0A
+#define IPI_VECTOR 0x21
+#define MAX_IPI 32
/* For the global state of the subsystem */
static uint32_t cookie = 0;
-/*
- * The next vector that will be used for an IPI to
- * be allocated. It starts at 0x21 because interrupt
- * vector 0x20 is used for the Hyra scheduler and `N_IPIVEC'
- * vectors up are reserved for inter-processor interrupts.
- *
- * XXX: This must not go beyond N_IPIVEC !!
- */
-static uint8_t next_vec = BASE_VECTOR;
-static uint8_t vec_entries = 0;
-
-/*
- * In order to get an index into the 'vectors' array,
- * one can pass an `ipi_bitmap' bit index into the
- * ipi_vector() function. The index into the `ipi`
- * field within may be acquired with the ipi_index()
- * function.
- */
-static uint64_t ipi_bitmap = 0;
-static struct ipi_vector vectors[N_IPIVEC];
+static struct cpu_ipi ipi_list[MAX_IPI];
+static uint8_t ipi_count = 0;
static struct spinlock lock;
/*
- * Allocate a bit from the `ipi_bitmap' and
- * return the index.
- *
- * Returns a less than zero value upon error.
- */
-static ssize_t
-alloc_ipi_bit(void)
-{
- const size_t MAX = sizeof(ipi_bitmap) * 8;
- off_t i;
-
- for (i = 0; i < MAX; ++i) {
- if (!ISSET(ipi_bitmap, BIT(i))) {
- ipi_bitmap |= BIT(i);
- return i;
- }
- }
-
- return -1;
-}
-
-/*
* Allocate an IPI that can be sent to other
* cores on the CPU. This is the core logic
* and contains *no* locks. One should be
@@ -111,167 +70,18 @@ alloc_ipi_bit(void)
static int
__ipi_alloc(struct cpu_ipi **res)
{
- struct ipi_vector *vp;
struct cpu_ipi *ipip;
- ssize_t bit;
- uint8_t idx;
-
- if (res == NULL) {
- return -EINVAL;
- }
- if (next_vec >= BASE_VECTOR + N_IPIVEC) {
+ if (ipi_count >= MAX_IPI) {
return -EAGAIN;
}
- /*
- * Attempt to allocate a bit index from
- * the bitmap.
- */
- if ((bit = alloc_ipi_bit()) < 0) {
- return -EAGAIN;
- }
-
- idx = ipi_vector(bit);
- vp = &vectors[idx];
-
- /* Initialize the vector if not already */
- if (vp->cookie != COOKIE) {
- vp->cookie = COOKIE;
- vp->nipi = 0;
- vp->vec = next_vec;
- memset(vp->ipi, 0, sizeof(vp->ipi));
- }
-
- /*
- * Just a sanity check here, the number of
- * IPIs per vector should never exceed the
- * maximum, and if it does, that gives us more
- * than enough grounds to panic the system as
- * it would not be wise to trust it.
- */
- if (__unlikely(vp->nipi >= IPI_PER_VEC)) {
- panic("too many IPIs in vector %x\n", vp->vec);
- }
-
- idx = ipi_index(bit);
- ipip = &vp->ipi[idx];
-
- /* We are allocating, not clobbering */
- if (ipip->cookie == COOKIE) {
- panic("ipi table corruption\n");
- }
-
- if ((++vec_entries) >= IPI_PER_VEC) {
- vec_entries = 0;
- ++next_vec;
- }
-
- /* Set up the initial state */
+ ipip = &ipi_list[ipi_count];
ipip->cookie = COOKIE;
+ ipip->id = ipi_count++;
ipip->handler = NULL;
- ipip->id = bit;
*res = ipip;
- return bit;
-}
-
-/*
- * Dispatch pending IPIs for the current
- * processor.
- *
- * @vector: Backing interrupt vector
- * @ci: Current processor
- */
-static void
-ipi_dispatch_pending(struct ipi_vector *vec, struct cpu_info *ci)
-{
- uint8_t bit_i;
- uint8_t n_bit;
- uint8_t index;
- struct cpu_ipi *ipip = NULL;
- ipi_pend_t pending;
-
- if (vec == NULL || ci == NULL) {
- return;
- }
-
- n_bit = sizeof(pending) * 8;
- for (bit_i = 0; bit_i < n_bit; ++bit_i) {
- index = ipi_vector(bit_i);
- pending = ci->ipi_pending[index];
-
- vec = &vectors[index];
- index = ipi_index(bit_i);
- ipip = &vec->ipi[index];
-
- /* Is this pending or not? */
- if (!ISSET(pending, BIT(bit_i))) {
- continue;
- }
-
- /* Handle and mark as no longer pending */
- ipip->handler(ipip);
- ci->ipi_pending[vec->vec] &= ~BIT(bit_i);
- }
-}
-
-/*
- * Check an IPI pending bitmap for a
- * vector and send IPIs as needed
- *
- * @ci: Target processor
- * @pending: Pending IPIs
- */
-static void
-ipi_send_vector(struct cpu_info *ci, ipi_pend_t pending)
-{
- struct ipi_vector *vp;
- struct cpu_ipi *ipip;
- uint8_t n_bits = sizeof(pending) * 8;
- uint8_t bit_i;
- uint8_t vector, index;
- uint32_t apic_id = 0;
-
- if (ci != NULL) {
- /*
- * We are already dispatching IPIs, we don't
- * want to find ourselves in interrupt hell.
- */
- if (ci->ipi_dispatch) {
- return;
- }
-
- apic_id = ci->apicid;
- }
-
- ci->ipi_dispatch = 1;
- for (bit_i = 0; bit_i < n_bits; ++bit_i) {
- if (ISSET(pending, BIT(bit_i))) {
- vector = ipi_vector(bit_i);
- index = ipi_index(bit_i);
-
- if (ci != NULL)
- ci->ipi_id = bit_i;
-
- vp = &vectors[vector];
- ipip = &vp->ipi[index];
-
- /* Ignore if cookie does match */
- if (ipip->cookie != COOKIE)
- continue;
-
- /* Ignore if there is no handler */
- if (ipip->handler == NULL)
- continue;
-
- /* Send that IPI through */
- lapic_send_ipi(
- apic_id,
- IPI_SHORTHAND_NONE,
- BASE_VECTOR + vector
- );
- }
- }
+ return ipip->id;
}
/*
@@ -282,33 +92,37 @@ ipi_send_vector(struct cpu_info *ci, ipi_pend_t pending)
void
__ipi_handle_common(void)
{
- struct ipi_vector *vp;
+ struct cpu_ipi *ipip;
struct cpu_info *ci = this_cpu();
- uint8_t vector;
+ ipi_pend_t pending = 0;
if (cookie != COOKIE) {
pr_trace("[warn]: got spurious ipi\n");
return;
}
- /* Grab the vector */
- vector = ipi_vector(ci->ipi_id);
- vp = &vectors[vector];
- if (vp->cookie != COOKIE) {
- pr_error("got IPI for uninitialized vector\n");
+ if (ci == NULL) {
+ pr_error("could not get current CPU\n");
return;
}
- if ((ci = this_cpu()) == NULL) {
- pr_error("could not get current CPU\n");
+ if (ipi_count == 0) {
+ pr_error("no registered IPIs\n");
return;
}
- ipi_dispatch_pending(vp, ci);
+ /* Attempt to find a handler */
+ pending = ci->ipi_pending;
+ for (int i = 0; i < ipi_count; ++i) {
+ ipip = &ipi_list[i];
+ if (ISSET(pending, BIT(i))) {
+ ipip->handler(ipip);
+ ci->ipi_pending &= ~BIT(i);
+ }
+ }
/* We are done dispatching IPIs */
ci->ipi_dispatch = 0;
- ci->ipi_id = 0;
}
/*
@@ -317,20 +131,34 @@ __ipi_handle_common(void)
* the `ci->ipi_pending' field
*
* @ci: Processor to send IPI(s) to
+ * @ipi: IPIs to send
*/
int
-md_ipi_send(struct cpu_info *ci)
+md_ipi_send(struct cpu_info *ci, ipi_pend_t ipi)
{
- if (ci == NULL) {
- return -EINVAL;
- }
+ uint32_t apic_id = 0;
- spinlock_acquire(&lock);
- for (int i = 0; i < N_IPIVEC; ++i) {
- ipi_send_vector(ci, ci->ipi_pending[i]);
+ if (ci != NULL) {
+ /*
+ * We are already dispatching IPIs, we don't
+ * want to find ourselves in interrupt hell.
+ */
+ if (ci->ipi_dispatch) {
+ return -EAGAIN;
+ }
+
+ apic_id = ci->apicid;
}
- spinlock_release(&lock);
+ ci->ipi_dispatch = 1;
+ ci->ipi_pending |= BIT(ipi);
+
+ /* Send it through on the bus */
+ lapic_send_ipi(
+ apic_id,
+ IPI_SHORTHAND_NONE,
+ IPI_VECTOR
+ );
return 0;
}
@@ -357,9 +185,6 @@ void
md_ipi_init(void)
{
/* Initialize the IPI vectors */
- idt_set_desc(0x21, IDT_INT_GATE, ISR(ipi_isr0), 0);
- idt_set_desc(0x22, IDT_INT_GATE, ISR(ipi_isr1), 0);
- idt_set_desc(0x23, IDT_INT_GATE, ISR(ipi_isr2), 0);
- idt_set_desc(0x24, IDT_INT_GATE, ISR(ipi_isr3), 0);
+ idt_set_desc(IPI_VECTOR, IDT_INT_GATE, ISR(ipi_isr), 0);
cookie = COOKIE;
}
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index 5fb006c..2976a51 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -404,7 +404,7 @@ cpu_shootdown_tlb(vaddr_t va)
spinlock_acquire(&cip->lock);
cip->shootdown_va = va;
cip->tlb_shootdown = 1;
- cpu_ipi_send(cip, IPI_TLB);
+ md_ipi_send(cip, IPI_TLB);
spinlock_release(&cip->lock);
}
}
@@ -461,7 +461,7 @@ md_backtrace(void)
void
cpu_halt_all(void)
{
- struct cpu_info *ci;
+ struct cpu_info *ci, *curcpu;
uint32_t ncpu = cpu_count();
/*
@@ -470,19 +470,21 @@ cpu_halt_all(void)
* processor is the only one active, clear interrupts
* then halt it.
*/
- if (rdmsr(IA32_GS_BASE) == 0) {
- __ASMV("cli; hlt");
+ __ASMV("cli");
+ if ((curcpu = this_cpu()) == NULL) {
+ __ASMV("hlt");
}
for (int i = 0; i < ncpu; ++i) {
ci = cpu_get(i);
- if (ci == NULL) {
+ if (ci->id == curcpu->id) {
continue;
}
- cpu_ipi_send(ci, IPI_HALT);
+ md_ipi_send(ci, IPI_HALT);
}
+ __ASMV("hlt");
for (;;);
}
@@ -508,7 +510,7 @@ cpu_halt_others(void)
if (ci->id == curcpu->id)
continue;
- cpu_ipi_send(ci, IPI_HALT);
+ md_ipi_send(ci, IPI_HALT);
}
}
diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
index 19c68d5..69173d7 100644
--- a/sys/arch/amd64/amd64/vector.S
+++ b/sys/arch/amd64/amd64/vector.S
@@ -139,31 +139,11 @@ pin_isr_load:
IDT_SET_VEC 100, ioapic_edge_63
ret
- .globl ipi_isr0
-INTRENTRY(ipi_isr0, ipi_trampoline)
+ .globl ipi_isr
+INTRENTRY(ipi_isr, ipi_trampoline)
call ipi_trampoline
retq
- .globl ipi_isr1
-INTRENTRY(ipi_isr1, ipi_trampoline)
- call ipi_trampoline
- retq
-
- .globl ipi_isr2
-INTRENTRY(ipi_isr2, ipi_trampoline)
- call ipi_trampoline
- retq
-
- .globl ipi_isr3
-INTRENTRY(ipi_isr3, ipi_trampoline)
- call ipi_trampoline
- retq
-
-/*
- * Hyra supports 16 IPI handlers per 4 reserved
- * IDT vectors. That allows for a maximum of
- * 64 IPIs.
- */
ipi_trampoline:
call __ipi_handle_common
retq