summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/amd64/amd64/intr.c8
-rw-r--r--sys/arch/amd64/amd64/ipi.c365
-rw-r--r--sys/arch/amd64/amd64/machdep.c95
-rw-r--r--sys/arch/amd64/amd64/mp.c2
-rw-r--r--sys/arch/amd64/amd64/vector.S157
5 files changed, 542 insertions, 85 deletions
diff --git a/sys/arch/amd64/amd64/intr.c b/sys/arch/amd64/amd64/intr.c
index a545788..c44c88e 100644
--- a/sys/arch/amd64/amd64/intr.c
+++ b/sys/arch/amd64/amd64/intr.c
@@ -98,12 +98,12 @@ intr_register(const char *name, const struct intr_hand *ih)
* Try to allocate an interrupt vector. An IPL is made up
* of 4 bits so there can be 16 vectors per IPL.
*
- * XXX: Vector 0x20 is reserved for the Hyra scheduler,
- * vector 0x21 is reserved for the CPU halt IPI,
- * and vector 0x22 is reserved for TLB shootdowns.
+ * XXX: Vector 0x20 is reserved for the Hyra scheduler and
+ * vectors 0x21 to 0x21 + N_IPIVEC are reserved for
+ * inter-processor interrupts.
*/
for (int i = vec; i < vec + 16; ++i) {
- if (g_intrs[i] != NULL || i < 0x23) {
+ if (g_intrs[i] != NULL || i < 0x24) {
continue;
}
diff --git a/sys/arch/amd64/amd64/ipi.c b/sys/arch/amd64/amd64/ipi.c
new file mode 100644
index 0000000..ffa291f
--- /dev/null
+++ b/sys/arch/amd64/amd64/ipi.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/syslog.h>
+#include <sys/param.h>
+#include <sys/panic.h>
+#include <sys/spinlock.h>
+#include <machine/cpu.h>
+#include <machine/idt.h>
+#include <machine/ipi.h>
+#include <machine/lapic.h>
+#include <string.h>
+
+void ipi_isr0(void);
+void ipi_isr1(void);
+void ipi_isr2(void);
+void ipi_isr3(void);
+
+void __ipi_handle_common(void);
+
+#define pr_trace(fmt, ...) kprintf("ipi: " fmt, ##__VA_ARGS__)
+#define pr_error(...) pr_trace(__VA_ARGS__)
+
+#define BASE_VECTOR 0x21
+#define COOKIE 0x7E0A
+
+/* For the global state of the subsystem */
+static uint32_t cookie = 0;
+
+/*
+ * The next vector that will be used for an IPI to
+ * be allocated. It starts at 0x21 because interrupt
+ * vector 0x20 is used for the Hyra scheduler and `N_IPIVEC'
+ * vectors up are reserved for inter-processor interrupts.
+ *
+ * XXX: This must not go beyond N_IPIVEC !!
+ */
+static uint8_t next_vec = BASE_VECTOR;
+static uint8_t vec_entries = 0;
+
+/*
+ * In order to get an index into the 'vectors' array,
+ * one can pass an `ipi_bitmap' bit index into the
+ * ipi_vector() function. The index into the `ipi`
+ * field within may be acquired with the ipi_index()
+ * function.
+ */
+static uint64_t ipi_bitmap = 0;
+static struct ipi_vector vectors[N_IPIVEC];
+static struct spinlock lock;
+
+/*
+ * Allocate a bit from the `ipi_bitmap' and
+ * return the index.
+ *
+ * Returns a less than zero value upon error.
+ */
+static ssize_t
+alloc_ipi_bit(void)
+{
+ const size_t MAX = sizeof(ipi_bitmap) * 8;
+ off_t i;
+
+ for (i = 0; i < MAX; ++i) {
+ if (!ISSET(ipi_bitmap, BIT(i))) {
+ ipi_bitmap |= BIT(i);
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * Allocate an IPI that can be sent to other
+ * cores on the CPU. This is the core logic
+ * and contains *no* locks. One should be
+ * using the md_ipi_alloc() function instead.
+ *
+ * Returns the allocated IPI identifier on succes,
+ * otherwise a less than zero value is returned.
+ */
+static int
+__ipi_alloc(struct cpu_ipi **res)
+{
+ struct ipi_vector *vp;
+ struct cpu_ipi *ipip;
+ ssize_t bit;
+ uint8_t idx;
+
+ if (res == NULL) {
+ return -EINVAL;
+ }
+
+ if (next_vec >= BASE_VECTOR + N_IPIVEC) {
+ return -EAGAIN;
+ }
+
+ /*
+ * Attempt to allocate a bit index from
+ * the bitmap.
+ */
+ if ((bit = alloc_ipi_bit()) < 0) {
+ return -EAGAIN;
+ }
+
+ idx = ipi_vector(bit);
+ vp = &vectors[idx];
+
+ /* Initialize the vector if not already */
+ if (vp->cookie != COOKIE) {
+ vp->cookie = COOKIE;
+ vp->nipi = 0;
+ vp->vec = next_vec;
+ memset(vp->ipi, 0, sizeof(vp->ipi));
+ }
+
+ /*
+ * Just a sanity check here, the number of
+ * IPIs per vector should never exceed the
+ * maximum, and if it does, that gives us more
+ * than enough grounds to panic the system as
+ * it would not be wise to trust it.
+ */
+ if (__unlikely(vp->nipi >= IPI_PER_VEC)) {
+ panic("too many IPIs in vector %x\n", vp->vec);
+ }
+
+ idx = ipi_index(bit);
+ ipip = &vp->ipi[idx];
+
+ /* We are allocating, not clobbering */
+ if (ipip->cookie == COOKIE) {
+ panic("ipi table corruption\n");
+ }
+
+ if ((++vec_entries) >= IPI_PER_VEC) {
+ vec_entries = 0;
+ ++next_vec;
+ }
+
+ /* Set up the initial state */
+ ipip->cookie = COOKIE;
+ ipip->handler = NULL;
+ ipip->id = bit;
+ *res = ipip;
+ return bit;
+}
+
+/*
+ * Dispatch pending IPIs for the current
+ * processor.
+ *
+ * @vector: Backing interrupt vector
+ * @ci: Current processor
+ */
+static void
+ipi_dispatch_pending(struct ipi_vector *vec, struct cpu_info *ci)
+{
+ uint8_t bit_i;
+ uint8_t n_bit;
+ uint8_t index;
+ struct cpu_ipi *ipip = NULL;
+ ipi_pend_t pending;
+
+ if (vec == NULL || ci == NULL) {
+ return;
+ }
+
+ n_bit = sizeof(pending) * 8;
+ for (bit_i = 0; bit_i < n_bit; ++bit_i) {
+ index = ipi_vector(bit_i);
+ pending = ci->ipi_pending[index];
+
+ vec = &vectors[index];
+ index = ipi_index(bit_i);
+ ipip = &vec->ipi[index];
+
+ /* Is this pending or not? */
+ if (!ISSET(pending, BIT(bit_i))) {
+ continue;
+ }
+
+ /* Handle and mark as no longer pending */
+ ipip->handler(ipip);
+ ci->ipi_pending[vec->vec] &= ~BIT(bit_i);
+ }
+}
+
+/*
+ * Check an IPI pending bitmap for a
+ * vector and send IPIs as needed
+ *
+ * @ci: Target processor
+ * @pending: Pending IPIs
+ */
+static void
+ipi_send_vector(struct cpu_info *ci, ipi_pend_t pending)
+{
+ struct ipi_vector *vp;
+ struct cpu_ipi *ipip;
+ uint8_t n_bits = sizeof(pending) * 8;
+ uint8_t bit_i;
+ uint8_t vector, index;
+ uint32_t apic_id = 0;
+
+ if (ci != NULL) {
+ /*
+ * We are already dispatching IPIs, we don't
+ * want to find ourselves in interrupt hell.
+ */
+ if (ci->ipi_dispatch) {
+ return;
+ }
+
+ apic_id = ci->apicid;
+ }
+
+ ci->ipi_dispatch = 1;
+ for (bit_i = 0; bit_i < n_bits; ++bit_i) {
+ if (ISSET(pending, BIT(bit_i))) {
+ vector = ipi_vector(bit_i);
+ index = ipi_index(bit_i);
+
+ if (ci != NULL)
+ ci->ipi_id = bit_i;
+
+ vp = &vectors[vector];
+ ipip = &vp->ipi[index];
+
+ /* Ignore if cookie does match */
+ if (ipip->cookie != COOKIE)
+ continue;
+
+ /* Ignore if there is no handler */
+ if (ipip->handler == NULL)
+ continue;
+
+ /* Send that IPI through */
+ lapic_send_ipi(
+ apic_id,
+ IPI_SHORTHAND_NONE,
+ BASE_VECTOR + vector
+ );
+ }
+ }
+}
+
+/*
+ * Common IPI routine, called from vector.S
+ *
+ * XXX: Internal usage only
+ */
+void
+__ipi_handle_common(void)
+{
+ struct ipi_vector *vp;
+ struct cpu_info *ci = this_cpu();
+ uint8_t vector;
+
+ if (cookie != COOKIE) {
+ pr_trace("[warn]: got spurious ipi\n");
+ return;
+ }
+
+ /* Grab the vector */
+ vector = ipi_vector(ci->ipi_id);
+ vp = &vectors[vector];
+ if (vp->cookie != COOKIE) {
+ pr_error("got IPI for uninitialized vector\n");
+ return;
+ }
+
+ if ((ci = this_cpu()) == NULL) {
+ pr_error("could not get current CPU\n");
+ return;
+ }
+
+ ipi_dispatch_pending(vp, ci);
+
+ /* We are done dispatching IPIs */
+ ci->ipi_dispatch = 0;
+ ci->ipi_id = 0;
+}
+
+/*
+ * Send one or more IPIs to a specific
+ * processor after caller sets bits in
+ * the `ci->ipi_pending' field
+ *
+ * @ci: Processor to send IPI(s) to
+ */
+int
+md_ipi_send(struct cpu_info *ci)
+{
+ if (ci == NULL) {
+ return -EINVAL;
+ }
+
+ spinlock_acquire(&lock);
+ for (int i = 0; i < N_IPIVEC; ++i) {
+ ipi_send_vector(ci, ci->ipi_pending[i]);
+ }
+
+ spinlock_release(&lock);
+ return 0;
+}
+
+
+/*
+ * IPI allocation interface with
+ * locking.
+ */
+int
+md_ipi_alloc(struct cpu_ipi **res)
+{
+ int retval;
+
+ spinlock_acquire(&lock);
+ retval = __ipi_alloc(res);
+ spinlock_release(&lock);
+ return retval;
+}
+
+/*
+ * Initialize the IPI thunks
+ */
+void
+md_ipi_init(void)
+{
+ /* Initialize the IPI vectors */
+ idt_set_desc(0x21, IDT_INT_GATE, ISR(ipi_isr0), 0);
+ idt_set_desc(0x22, IDT_INT_GATE, ISR(ipi_isr1), 0);
+ idt_set_desc(0x23, IDT_INT_GATE, ISR(ipi_isr2), 0);
+ idt_set_desc(0x24, IDT_INT_GATE, ISR(ipi_isr3), 0);
+ cookie = COOKIE;
+}
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index 9ff96e1..3f8580a 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -42,6 +42,7 @@
#include <machine/uart.h>
#include <machine/sync.h>
#include <machine/intr.h>
+#include <machine/ipi.h>
#include <machine/cdefs.h>
#include <machine/isa/i8042var.h>
#include <dev/cons/cons.h>
@@ -61,9 +62,6 @@
pr_trace(__VA_ARGS__); \
}
-#define HALT_VECTOR 0x21
-#define TLB_VECTOR 0x22
-
#if defined(__SPECTRE_IBRS)
#define SPECTRE_IBRS __SPECTRE_IBRS
#else
@@ -82,19 +80,20 @@ void syscall_isr(void);
void pin_isr_load(void);
struct cpu_info g_bsp_ci = {0};
+static struct cpu_ipi *halt_ipi;
+static struct cpu_ipi *tlb_ipi;
+static struct spinlock ipi_lock = {0};
static bool bsp_init = false;
-__attribute__((__interrupt__))
-static void
-cpu_halt_isr(void *p)
+static int
+cpu_halt_handler(struct cpu_ipi *ipi)
{
__ASMV("cli; hlt");
__builtin_unreachable();
}
-__attribute__((__interrupt__))
-static void
-tlb_shootdown_isr(void *p)
+static int
+tlb_shootdown_handler(struct cpu_ipi *ipi)
{
struct cpu_info *ci;
int ipl;
@@ -106,7 +105,7 @@ tlb_shootdown_isr(void *p)
*/
ci = this_cpu();
if (!ci->tlb_shootdown) {
- return;
+ return -1;
}
ipl = splraise(IPL_HIGH);
@@ -115,6 +114,7 @@ tlb_shootdown_isr(void *p)
ci->shootdown_va = 0;
ci->tlb_shootdown = 0;
splx(ipl);
+ return 0;
}
static void
@@ -141,8 +141,6 @@ setup_vectors(struct cpu_info *ci)
idt_set_desc(0xD, IDT_TRAP_GATE, ISR(general_prot), 0);
idt_set_desc(0xE, IDT_TRAP_GATE, ISR(page_fault), 0);
idt_set_desc(0x80, IDT_USER_INT_GATE, ISR(syscall_isr), IST_SYSCALL);
- idt_set_desc(HALT_VECTOR, IDT_INT_GATE, ISR(cpu_halt_isr), 0);
- idt_set_desc(TLB_VECTOR, IDT_INT_GATE, ISR(tlb_shootdown_isr), 0);
pin_isr_load();
}
@@ -202,6 +200,44 @@ enable_simd(void)
}
static void
+init_ipis(void)
+{
+ int error;
+
+ if (bsp_init) {
+ return;
+ }
+
+ spinlock_acquire(&ipi_lock);
+ error = md_ipi_alloc(&halt_ipi);
+ if (error < 0) {
+ pr_error("md_ipi_alloc: returned %d\n", error);
+ panic("failed to init halt IPI\n");
+ }
+
+ halt_ipi->handler = cpu_halt_handler;
+ error = md_ipi_alloc(&tlb_ipi);
+ if (error < 0) {
+ pr_error("md_ipi_alloc: returned %d\n", error);
+ panic("failed to init TLB IPI\n");
+ }
+
+ tlb_ipi->handler = tlb_shootdown_handler;
+
+ /*
+ * Some IPIs must have very specific IDs
+ * so that they are standard and usable
+ * throughout the rest of the sytem.
+ */
+ if (halt_ipi->id != IPI_HALT)
+ panic("expected IPI_HALT for halt IPI\n");
+ if (tlb_ipi->id != IPI_TLB)
+ panic("expected IPI_TLB for TLB IPI\n");
+
+ spinlock_release(&ipi_lock);
+}
+
+static void
cpu_get_info(struct cpu_info *ci)
{
uint32_t eax, ebx, unused;
@@ -257,7 +293,7 @@ cpu_shootdown_tlb(vaddr_t va)
spinlock_acquire(&cip->lock);
cip->shootdown_va = va;
cip->tlb_shootdown = 1;
- lapic_send_ipi(cip->apicid, IPI_SHORTHAND_NONE, TLB_VECTOR);
+ cpu_ipi_send(cip, IPI_TLB);
spinlock_release(&cip->lock);
}
}
@@ -309,6 +345,9 @@ md_backtrace(void)
void
cpu_halt_all(void)
{
+ struct cpu_info *ci;
+ uint32_t ncpu;
+
/*
* If we have no current 'cpu_info' structure set,
* we can't send IPIs, so just assume only the current
@@ -319,8 +358,15 @@ cpu_halt_all(void)
__ASMV("cli; hlt");
}
- /* Send IPI to all cores */
- lapic_send_ipi(0, IPI_SHORTHAND_ALL, HALT_VECTOR);
+ for (int i = 0; i < ncpu; ++i) {
+ ci = cpu_get(i);
+ if (ci == NULL) {
+ continue;
+ }
+
+ cpu_ipi_send(ci, IPI_HALT);
+ }
+
for (;;);
}
@@ -331,12 +377,24 @@ cpu_halt_all(void)
void
cpu_halt_others(void)
{
+ struct cpu_info *curcpu, *ci;
+ uint32_t ncpu;
+
if (rdmsr(IA32_GS_BASE) == 0) {
__ASMV("cli; hlt");
}
- /* Send IPI to all cores */
- lapic_send_ipi(0, IPI_SHORTHAND_OTHERS, HALT_VECTOR);
+ curcpu = this_cpu();
+ ncpu = cpu_count();
+
+ for (int i = 0; i < ncpu; ++i) {
+ if ((ci = cpu_get(i)) == NULL)
+ continue;
+ if (ci->id == curcpu->id)
+ continue;
+
+ cpu_ipi_send(ci, IPI_HALT);
+ }
}
void
@@ -441,7 +499,10 @@ cpu_startup(struct cpu_info *ci)
wrmsr(IA32_GS_BASE, (uintptr_t)ci);
init_tss(ci);
+
setup_vectors(ci);
+ md_ipi_init();
+ init_ipis();
try_mitigate_spectre();
ci->online = 1;
diff --git a/sys/arch/amd64/amd64/mp.c b/sys/arch/amd64/amd64/mp.c
index 20f550f..43830ba 100644
--- a/sys/arch/amd64/amd64/mp.c
+++ b/sys/arch/amd64/amd64/mp.c
@@ -30,6 +30,7 @@
#include <sys/types.h>
#include <sys/limine.h>
#include <sys/limits.h>
+#include <sys/systm.h>
#include <sys/syslog.h>
#include <sys/proc.h>
#include <sys/spinlock.h>
@@ -149,4 +150,5 @@ mp_bootstrap_aps(struct cpu_info *ci)
/* Wait for all cores to be ready */
while ((ncpu_up - 1) < cpu_init_counter);
+ cpu_report_count(ncpu_up);
}
diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
index 890b314..19c68d5 100644
--- a/sys/arch/amd64/amd64/vector.S
+++ b/sys/arch/amd64/amd64/vector.S
@@ -73,72 +73,101 @@ done:
.globl pin_isr_load
pin_isr_load:
- IDT_SET_VEC 35, ioapic_edge_0
- IDT_SET_VEC 36, ioapic_edge_1
- IDT_SET_VEC 37, ioapic_edge_2
- IDT_SET_VEC 38, ioapic_edge_3
- IDT_SET_VEC 39, ioapic_edge_4
- IDT_SET_VEC 40, ioapic_edge_5
- IDT_SET_VEC 41, ioapic_edge_6
- IDT_SET_VEC 42, ioapic_edge_7
- IDT_SET_VEC 43, ioapic_edge_8
- IDT_SET_VEC 44, ioapic_edge_9
- IDT_SET_VEC 45, ioapic_edge_10
- IDT_SET_VEC 46, ioapic_edge_11
- IDT_SET_VEC 47, ioapic_edge_12
- IDT_SET_VEC 48, ioapic_edge_13
- IDT_SET_VEC 49, ioapic_edge_14
- IDT_SET_VEC 50, ioapic_edge_15
- IDT_SET_VEC 51, ioapic_edge_16
- IDT_SET_VEC 52, ioapic_edge_17
- IDT_SET_VEC 53, ioapic_edge_18
- IDT_SET_VEC 54, ioapic_edge_19
- IDT_SET_VEC 55, ioapic_edge_20
- IDT_SET_VEC 56, ioapic_edge_21
- IDT_SET_VEC 57, ioapic_edge_22
- IDT_SET_VEC 58, ioapic_edge_23
- IDT_SET_VEC 59, ioapic_edge_24
- IDT_SET_VEC 60, ioapic_edge_25
- IDT_SET_VEC 61, ioapic_edge_26
- IDT_SET_VEC 62, ioapic_edge_27
- IDT_SET_VEC 63, ioapic_edge_28
- IDT_SET_VEC 64, ioapic_edge_29
- IDT_SET_VEC 65, ioapic_edge_30
- IDT_SET_VEC 66, ioapic_edge_31
- IDT_SET_VEC 67, ioapic_edge_32
- IDT_SET_VEC 68, ioapic_edge_33
- IDT_SET_VEC 69, ioapic_edge_34
- IDT_SET_VEC 70, ioapic_edge_35
- IDT_SET_VEC 71, ioapic_edge_36
- IDT_SET_VEC 72, ioapic_edge_37
- IDT_SET_VEC 73, ioapic_edge_38
- IDT_SET_VEC 74, ioapic_edge_39
- IDT_SET_VEC 75, ioapic_edge_40
- IDT_SET_VEC 76, ioapic_edge_41
- IDT_SET_VEC 77, ioapic_edge_42
- IDT_SET_VEC 78, ioapic_edge_43
- IDT_SET_VEC 79, ioapic_edge_44
- IDT_SET_VEC 80, ioapic_edge_45
- IDT_SET_VEC 81, ioapic_edge_46
- IDT_SET_VEC 82, ioapic_edge_47
- IDT_SET_VEC 83, ioapic_edge_48
- IDT_SET_VEC 84, ioapic_edge_49
- IDT_SET_VEC 85, ioapic_edge_50
- IDT_SET_VEC 86, ioapic_edge_51
- IDT_SET_VEC 87, ioapic_edge_52
- IDT_SET_VEC 88, ioapic_edge_53
- IDT_SET_VEC 89, ioapic_edge_54
- IDT_SET_VEC 90, ioapic_edge_55
- IDT_SET_VEC 91, ioapic_edge_56
- IDT_SET_VEC 92, ioapic_edge_57
- IDT_SET_VEC 93, ioapic_edge_58
- IDT_SET_VEC 94, ioapic_edge_59
- IDT_SET_VEC 95, ioapic_edge_60
- IDT_SET_VEC 96, ioapic_edge_61
- IDT_SET_VEC 97, ioapic_edge_62
- IDT_SET_VEC 97, ioapic_edge_63
+ IDT_SET_VEC 37, ioapic_edge_0
+ IDT_SET_VEC 38, ioapic_edge_1
+ IDT_SET_VEC 39, ioapic_edge_2
+ IDT_SET_VEC 40, ioapic_edge_3
+ IDT_SET_VEC 41, ioapic_edge_4
+ IDT_SET_VEC 42, ioapic_edge_5
+ IDT_SET_VEC 43, ioapic_edge_6
+ IDT_SET_VEC 44, ioapic_edge_7
+ IDT_SET_VEC 45, ioapic_edge_8
+ IDT_SET_VEC 46, ioapic_edge_9
+ IDT_SET_VEC 47, ioapic_edge_10
+ IDT_SET_VEC 48, ioapic_edge_11
+ IDT_SET_VEC 49, ioapic_edge_12
+ IDT_SET_VEC 50, ioapic_edge_13
+ IDT_SET_VEC 51, ioapic_edge_14
+ IDT_SET_VEC 52, ioapic_edge_15
+ IDT_SET_VEC 53, ioapic_edge_16
+ IDT_SET_VEC 54, ioapic_edge_17
+ IDT_SET_VEC 55, ioapic_edge_18
+ IDT_SET_VEC 56, ioapic_edge_19
+ IDT_SET_VEC 57, ioapic_edge_20
+ IDT_SET_VEC 58, ioapic_edge_21
+ IDT_SET_VEC 59, ioapic_edge_22
+ IDT_SET_VEC 60, ioapic_edge_23
+ IDT_SET_VEC 61, ioapic_edge_24
+ IDT_SET_VEC 62, ioapic_edge_25
+ IDT_SET_VEC 63, ioapic_edge_26
+ IDT_SET_VEC 64, ioapic_edge_27
+ IDT_SET_VEC 65, ioapic_edge_28
+ IDT_SET_VEC 66, ioapic_edge_29
+ IDT_SET_VEC 67, ioapic_edge_30
+ IDT_SET_VEC 68, ioapic_edge_31
+ IDT_SET_VEC 69, ioapic_edge_32
+ IDT_SET_VEC 70, ioapic_edge_33
+ IDT_SET_VEC 71, ioapic_edge_34
+ IDT_SET_VEC 72, ioapic_edge_35
+ IDT_SET_VEC 73, ioapic_edge_36
+ IDT_SET_VEC 74, ioapic_edge_37
+ IDT_SET_VEC 75, ioapic_edge_38
+ IDT_SET_VEC 76, ioapic_edge_39
+ IDT_SET_VEC 77, ioapic_edge_40
+ IDT_SET_VEC 78, ioapic_edge_41
+ IDT_SET_VEC 79, ioapic_edge_42
+ IDT_SET_VEC 80, ioapic_edge_43
+ IDT_SET_VEC 81, ioapic_edge_44
+ IDT_SET_VEC 82, ioapic_edge_45
+ IDT_SET_VEC 83, ioapic_edge_46
+ IDT_SET_VEC 84, ioapic_edge_47
+ IDT_SET_VEC 85, ioapic_edge_48
+ IDT_SET_VEC 86, ioapic_edge_49
+ IDT_SET_VEC 87, ioapic_edge_50
+ IDT_SET_VEC 88, ioapic_edge_51
+ IDT_SET_VEC 89, ioapic_edge_52
+ IDT_SET_VEC 90, ioapic_edge_53
+ IDT_SET_VEC 91, ioapic_edge_54
+ IDT_SET_VEC 92, ioapic_edge_55
+ IDT_SET_VEC 93, ioapic_edge_56
+ IDT_SET_VEC 94, ioapic_edge_57
+ IDT_SET_VEC 95, ioapic_edge_58
+ IDT_SET_VEC 96, ioapic_edge_59
+ IDT_SET_VEC 97, ioapic_edge_60
+ IDT_SET_VEC 98, ioapic_edge_61
+ IDT_SET_VEC 99, ioapic_edge_62
+ IDT_SET_VEC 100, ioapic_edge_63
ret
+ .globl ipi_isr0
+INTRENTRY(ipi_isr0, ipi_trampoline)
+ call ipi_trampoline
+ retq
+
+ .globl ipi_isr1
+INTRENTRY(ipi_isr1, ipi_trampoline)
+ call ipi_trampoline
+ retq
+
+ .globl ipi_isr2
+INTRENTRY(ipi_isr2, ipi_trampoline)
+ call ipi_trampoline
+ retq
+
+ .globl ipi_isr3
+INTRENTRY(ipi_isr3, ipi_trampoline)
+ call ipi_trampoline
+ retq
+
+/*
+ * Hyra supports 16 IPI handlers per 4 reserved
+ * IDT vectors. That allows for a maximum of
+ * 64 IPIs.
+ */
+ipi_trampoline:
+ call __ipi_handle_common
+ retq
+
/* I/O APIC edge ISRs */
INTRENTRY(ioapic_edge_0, ioapic_common_func)
INTRENTRY(ioapic_edge_1, ioapic_common_func)