summaryrefslogtreecommitdiff
path: root/sys/arch/amd64/amd64/machdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/amd64/amd64/machdep.c')
-rw-r--r--sys/arch/amd64/amd64/machdep.c203
1 files changed, 184 insertions, 19 deletions
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index 9ff96e1..f0571b3 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -42,6 +42,7 @@
#include <machine/uart.h>
#include <machine/sync.h>
#include <machine/intr.h>
+#include <machine/ipi.h>
#include <machine/cdefs.h>
#include <machine/isa/i8042var.h>
#include <dev/cons/cons.h>
@@ -61,9 +62,6 @@
pr_trace(__VA_ARGS__); \
}
-#define HALT_VECTOR 0x21
-#define TLB_VECTOR 0x22
-
#if defined(__SPECTRE_IBRS)
#define SPECTRE_IBRS __SPECTRE_IBRS
#else
@@ -76,25 +74,32 @@
#define CPU_SMEP 0
#endif
+#if defined(__CPU_UMIP)
+#define CPU_UMIP __CPU_UMIP
+#else
+#define CPU_UMIP 0
+#endif
+
int ibrs_enable(void);
int simd_init(void);
void syscall_isr(void);
void pin_isr_load(void);
struct cpu_info g_bsp_ci = {0};
+static struct cpu_ipi *halt_ipi;
+static struct cpu_ipi *tlb_ipi;
+static struct spinlock ipi_lock = {0};
static bool bsp_init = false;
-__attribute__((__interrupt__))
-static void
-cpu_halt_isr(void *p)
+static int
+cpu_halt_handler(struct cpu_ipi *ipi)
{
__ASMV("cli; hlt");
__builtin_unreachable();
}
-__attribute__((__interrupt__))
-static void
-tlb_shootdown_isr(void *p)
+static int
+tlb_shootdown_handler(struct cpu_ipi *ipi)
{
struct cpu_info *ci;
int ipl;
@@ -106,7 +111,7 @@ tlb_shootdown_isr(void *p)
*/
ci = this_cpu();
if (!ci->tlb_shootdown) {
- return;
+ return -1;
}
ipl = splraise(IPL_HIGH);
@@ -115,6 +120,7 @@ tlb_shootdown_isr(void *p)
ci->shootdown_va = 0;
ci->tlb_shootdown = 0;
splx(ipl);
+ return 0;
}
static void
@@ -141,8 +147,6 @@ setup_vectors(struct cpu_info *ci)
idt_set_desc(0xD, IDT_TRAP_GATE, ISR(general_prot), 0);
idt_set_desc(0xE, IDT_TRAP_GATE, ISR(page_fault), 0);
idt_set_desc(0x80, IDT_USER_INT_GATE, ISR(syscall_isr), IST_SYSCALL);
- idt_set_desc(HALT_VECTOR, IDT_INT_GATE, ISR(cpu_halt_isr), 0);
- idt_set_desc(TLB_VECTOR, IDT_INT_GATE, ISR(tlb_shootdown_isr), 0);
pin_isr_load();
}
@@ -202,17 +206,128 @@ enable_simd(void)
}
static void
+init_ipis(void)
+{
+ int error;
+
+ if (bsp_init) {
+ return;
+ }
+
+ spinlock_acquire(&ipi_lock);
+ error = md_ipi_alloc(&halt_ipi);
+ if (error < 0) {
+ pr_error("md_ipi_alloc: returned %d\n", error);
+ panic("failed to init halt IPI\n");
+ }
+
+ halt_ipi->handler = cpu_halt_handler;
+ error = md_ipi_alloc(&tlb_ipi);
+ if (error < 0) {
+ pr_error("md_ipi_alloc: returned %d\n", error);
+ panic("failed to init TLB IPI\n");
+ }
+
+ tlb_ipi->handler = tlb_shootdown_handler;
+
+ /*
+ * Some IPIs must have very specific IDs
+ * so that they are standard and usable
+ * throughout the rest of the sytem.
+ */
+ if (halt_ipi->id != IPI_HALT)
+ panic("expected IPI_HALT for halt IPI\n");
+ if (tlb_ipi->id != IPI_TLB)
+ panic("expected IPI_TLB for TLB IPI\n");
+
+ spinlock_release(&ipi_lock);
+}
+
+static void
+cpu_get_vendor(struct cpu_info *ci)
+{
+ uint32_t unused, ebx, ecx, edx;
+ char vendor_str[13];
+
+ /*
+ * This CPUID returns a 12 byte CPU vendor string
+ * that we'll put together and use to detect the vendor.
+ */
+ CPUID(0, unused, ebx, ecx, edx);
+
+ /* Dword 0 */
+ vendor_str[0] = ebx & 0xFF;
+ vendor_str[1] = (ebx >> 8) & 0xFF;
+ vendor_str[2] = (ebx >> 16) & 0xFF;
+ vendor_str[3] = (ebx >> 24) & 0xFF;
+
+ /* Dword 1 */
+ vendor_str[4] = edx & 0xFF;
+ vendor_str[5] = (edx >> 8) & 0xFF;
+ vendor_str[6] = (edx >> 16) & 0xFF;
+ vendor_str[7] = (edx >> 24) & 0xFF;
+
+ /* Dword 2 */
+ vendor_str[8] = ecx & 0xFF;
+ vendor_str[9] = (ecx >> 8) & 0xFF;
+ vendor_str[10] = (ecx >> 16) & 0xFF;
+ vendor_str[11] = (ecx >> 24) & 0xFF;
+ vendor_str[12] = '\0';
+
+ /* Is this an AMD CPU? */
+ if (strcmp(vendor_str, "AuthenticAMD") == 0) {
+ ci->vendor = CPU_VENDOR_AMD;
+ return;
+ }
+
+ /* Is this an Intel CPU? */
+ if (strcmp(vendor_str, "GenuineIntel") == 0) {
+ ci->vendor = CPU_VENDOR_INTEL;
+ return;
+ }
+
+ /*
+ * Some buggy Intel CPUs report the string "GenuineIotel"
+ * instead of "GenuineIntel". This is rare but we should
+ * still handle it as it can happen. Probably a good idea
+ * to log it so the user can know about their rare CPU
+ * quirk and brag to their friends :~)
+ */
+ if (strcmp(vendor_str, "GenuineIotel") == 0) {
+ pr_trace_bsp("vendor_str=%s\n", vendor_str);
+ pr_trace_bsp("detected vendor string quirk\n");
+ ci->vendor = CPU_VENDOR_INTEL;
+ return;
+ }
+
+ ci->vendor = CPU_VENDOR_OTHER;
+}
+
+static void
cpu_get_info(struct cpu_info *ci)
{
- uint32_t eax, ebx, unused;
+ uint32_t eax, ebx, ecx, edx;
uint8_t ext_model, ext_family;
+ /* Get the vendor information */
+ cpu_get_vendor(ci);
+
/* Extended features */
- CPUID(0x07, unused, ebx, unused, unused);
+ CPUID(0x07, unused, ebx, ecx, unused);
if (ISSET(ebx, BIT(7)))
ci->feat |= CPU_FEAT_SMEP;
if (ISSET(ebx, BIT(20)))
ci->feat |= CPU_FEAT_SMAP;
+ if (ISSET(ecx, BIT(2)))
+ ci->feat |= CPU_FEAT_UMIP;
+
+ /*
+ * Processor power management information bits as well
+ * as bits describing RAS capabilities
+ */
+ CPUID(0x80000007, unused, unused, unused, edx);
+ if (ISSET(edx, BIT(8))
+ ci->feat |= CPU_FEAT_TSCINV;
/*
* Processor info and feature bits
@@ -242,6 +357,30 @@ cpu_get_info(struct cpu_info *ci)
}
}
+/*
+ * The CR4.UMIP bit prevents user programs from
+ * executing instructions related to accessing
+ * system memory structures. This should be enabled
+ * by default if supported.
+ */
+static void
+cpu_enable_umip(void)
+{
+ struct cpu_info *ci = this_cpu();
+ uint64_t cr4;
+
+ if (!CPU_UMIP) {
+ pr_trace_bsp("UMIP not configured\n");
+ return;
+ }
+
+ if (ISSET(ci->feat, CPU_FEAT_UMIP)) {
+ cr4 = amd64_read_cr4();
+ cr4 |= CR4_UMIP;
+ amd64_write_cr4(cr4);
+ }
+}
+
void
cpu_shootdown_tlb(vaddr_t va)
{
@@ -257,7 +396,7 @@ cpu_shootdown_tlb(vaddr_t va)
spinlock_acquire(&cip->lock);
cip->shootdown_va = va;
cip->tlb_shootdown = 1;
- lapic_send_ipi(cip->apicid, IPI_SHORTHAND_NONE, TLB_VECTOR);
+ cpu_ipi_send(cip, IPI_TLB);
spinlock_release(&cip->lock);
}
}
@@ -309,6 +448,9 @@ md_backtrace(void)
void
cpu_halt_all(void)
{
+ struct cpu_info *ci;
+ uint32_t ncpu;
+
/*
* If we have no current 'cpu_info' structure set,
* we can't send IPIs, so just assume only the current
@@ -319,8 +461,15 @@ cpu_halt_all(void)
__ASMV("cli; hlt");
}
- /* Send IPI to all cores */
- lapic_send_ipi(0, IPI_SHORTHAND_ALL, HALT_VECTOR);
+ for (int i = 0; i < ncpu; ++i) {
+ ci = cpu_get(i);
+ if (ci == NULL) {
+ continue;
+ }
+
+ cpu_ipi_send(ci, IPI_HALT);
+ }
+
for (;;);
}
@@ -331,12 +480,24 @@ cpu_halt_all(void)
void
cpu_halt_others(void)
{
+ struct cpu_info *curcpu, *ci;
+ uint32_t ncpu;
+
if (rdmsr(IA32_GS_BASE) == 0) {
__ASMV("cli; hlt");
}
- /* Send IPI to all cores */
- lapic_send_ipi(0, IPI_SHORTHAND_OTHERS, HALT_VECTOR);
+ curcpu = this_cpu();
+ ncpu = cpu_count();
+
+ for (int i = 0; i < ncpu; ++i) {
+ if ((ci = cpu_get(i)) == NULL)
+ continue;
+ if (ci->id == curcpu->id)
+ continue;
+
+ cpu_ipi_send(ci, IPI_HALT);
+ }
}
void
@@ -441,13 +602,17 @@ cpu_startup(struct cpu_info *ci)
wrmsr(IA32_GS_BASE, (uintptr_t)ci);
init_tss(ci);
+
setup_vectors(ci);
+ md_ipi_init();
+ init_ipis();
try_mitigate_spectre();
ci->online = 1;
cpu_get_info(ci);
cpu_enable_smep();
+ cpu_enable_umip();
enable_simd();
lapic_init();