summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/amd64/amd64/intr.c1
-rw-r--r--sys/arch/amd64/amd64/lapic_intr.S2
-rw-r--r--sys/arch/amd64/amd64/machdep.c34
-rw-r--r--sys/arch/amd64/amd64/mp.c18
-rw-r--r--sys/arch/amd64/amd64/proc_machdep.c71
-rw-r--r--sys/arch/amd64/amd64/vector.S10
-rw-r--r--sys/arch/amd64/conf/GENERIC1
-rw-r--r--sys/arch/amd64/isa/i8042.c18
8 files changed, 147 insertions, 8 deletions
diff --git a/sys/arch/amd64/amd64/intr.c b/sys/arch/amd64/amd64/intr.c
index 685a16d..a545788 100644
--- a/sys/arch/amd64/amd64/intr.c
+++ b/sys/arch/amd64/amd64/intr.c
@@ -129,6 +129,7 @@ intr_register(const char *name, const struct intr_hand *ih)
ih_new->priority = ih->priority;
ih_new->irq = ih->irq;
ih_new->vector = i;
+ ih_new->nintr = 0;
g_intrs[i] = ih_new;
if (ih->irq >= 0) {
diff --git a/sys/arch/amd64/amd64/lapic_intr.S b/sys/arch/amd64/amd64/lapic_intr.S
index 5ae8f39..1413660 100644
--- a/sys/arch/amd64/amd64/lapic_intr.S
+++ b/sys/arch/amd64/amd64/lapic_intr.S
@@ -33,6 +33,6 @@
.globl lapic_tmr_isr
INTRENTRY(lapic_tmr_isr, handle_lapic_tmr)
handle_lapic_tmr:
- call sched_switch // Context switch per every timer IRQ
+ call md_sched_switch // Context switch per every timer IRQ
call lapic_eoi // Done! Signal that we finished to the Local APIC
retq
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index d310460..40950f9 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -187,9 +187,10 @@ enable_simd(void)
}
static void
-cpu_check_feat(struct cpu_info *ci)
+cpu_get_info(struct cpu_info *ci)
{
- uint32_t unused, ebx;
+ uint32_t eax, ebx, unused;
+ uint8_t ext_model, ext_family;
/* Extended features */
CPUID(0x07, unused, ebx, unused, unused);
@@ -197,6 +198,33 @@ cpu_check_feat(struct cpu_info *ci)
ci->feat |= CPU_FEAT_SMEP;
if (ISSET(ebx, BIT(20)))
ci->feat |= CPU_FEAT_SMAP;
+
+ /*
+ * Processor info and feature bits
+ */
+ CPUID(0x01, eax, unused, unused, unused);
+ ci->model = (eax >> 4) & 0xF;
+ ci->family = (eax >> 8) & 0xF;
+
+ /*
+ * If the family ID is 15 then the actual family
+ * ID is the sum of the extended family and the
+ * family ID fields.
+ */
+ if (ci->family == 0xF) {
+ ext_family = (eax >> 20) & 0xFF;
+ ci->family += ext_family;
+ }
+
+ /*
+ * If the family has the value of either 6 or 15,
+ * then the extended model number would be used.
+ * Slap them together if this is the case.
+ */
+ if (ci->family == 6 || ci->family == 15) {
+ ext_model = (eax >> 16) & 0xF;
+ ci->model |= (ext_model << 4);
+ }
}
void
@@ -383,7 +411,7 @@ cpu_startup(struct cpu_info *ci)
init_tss(ci);
try_mitigate_spectre();
- cpu_check_feat(ci);
+ cpu_get_info(ci);
cpu_enable_smep();
enable_simd();
diff --git a/sys/arch/amd64/amd64/mp.c b/sys/arch/amd64/amd64/mp.c
index dbee32c..21881b2 100644
--- a/sys/arch/amd64/amd64/mp.c
+++ b/sys/arch/amd64/amd64/mp.c
@@ -81,6 +81,24 @@ cpu_get(uint32_t index)
return ci_list[index];
}
+/*
+ * Grab the CPU stat structured of a specified
+ * processor
+ *
+ * @cpu_index: CPU index number
+ */
+struct sched_cpu *
+cpu_get_stat(uint32_t cpu_index)
+{
+ struct cpu_info *ci;
+
+ if ((ci = cpu_get(cpu_index)) == NULL) {
+ return NULL;
+ }
+
+ return &ci->stat;
+}
+
uint32_t
cpu_count(void)
{
diff --git a/sys/arch/amd64/amd64/proc_machdep.c b/sys/arch/amd64/amd64/proc_machdep.c
index 63604a4..ad807fe 100644
--- a/sys/arch/amd64/amd64/proc_machdep.c
+++ b/sys/arch/amd64/amd64/proc_machdep.c
@@ -32,6 +32,8 @@
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/exec.h>
+#include <sys/sched.h>
+#include <sys/schedvar.h>
#include <machine/frame.h>
#include <machine/gdt.h>
#include <machine/cpu.h>
@@ -220,3 +222,72 @@ md_spawn(struct proc *p, struct proc *parent, uintptr_t ip)
tfp->rsp = ALIGN_DOWN((stack_base + PROC_STACK_SIZE) - 1, 16);
return 0;
}
+
+/*
+ * Save thread state and enqueue it back into one
+ * of the ready queues.
+ */
+static void
+sched_save_td(struct proc *td, struct trapframe *tf)
+{
+ /*
+ * Save trapframe to process structure only
+ * if PROC_EXEC is not set.
+ */
+ if (!ISSET(td->flags, PROC_EXEC)) {
+ memcpy(&td->tf, tf, sizeof(td->tf));
+ }
+
+ sched_enqueue_td(td);
+}
+
+static void
+sched_switch_to(struct trapframe *tf, struct proc *td)
+{
+ struct cpu_info *ci;
+ struct sched_cpu *cpustat;
+ struct pcb *pcbp;
+
+ ci = this_cpu();
+
+ if (tf != NULL) {
+ memcpy(tf, &td->tf, sizeof(*tf));
+ }
+
+ /* Update stats */
+ cpustat = &ci->stat;
+ cpustat->nswitch++;
+
+ ci->curtd = td;
+ pcbp = &td->pcb;
+ pmap_switch_vas(pcbp->addrsp);
+}
+
+/*
+ * Perform a context switch.
+ */
+void
+md_sched_switch(struct trapframe *tf)
+{
+ struct proc *next_td, *td;
+ struct cpu_info *ci;
+
+ ci = this_cpu();
+ td = ci->curtd;
+ mi_sched_switch(td);
+
+ if (td != NULL) {
+ if (td->pid == 0)
+ return;
+
+ sched_save_td(td, tf);
+ }
+
+ if ((next_td = sched_dequeue_td()) == NULL) {
+ sched_oneshot(false);
+ return;
+ }
+
+ sched_switch_to(tf, next_td);
+ sched_oneshot(false);
+}
diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
index c820a41..890b314 100644
--- a/sys/arch/amd64/amd64/vector.S
+++ b/sys/arch/amd64/amd64/vector.S
@@ -51,16 +51,22 @@ ioapic_common_func:
jz 1f // Nope, return
mov (%rdx), %rbx // intr_hand.func
- add $8, %rdx // Get interrupt data
+ add $16, %rdx // Get interrupt data
mov %rdx, %rdi // Pass the interrupt data
push %rcx // Save our counter
+ push %rdx
call *%rbx // Call the handler
+ pop %rdx
pop %rcx // Restore our counter
or %rax, %rax // Was it theirs? (RET >= 1)
- jnz done // Yes, we are done.
+ jnz handled // Yes, we are done.
1: inc %rcx // Next
cmp $256, %rcx // Did we reach the end?
jl .walk // Nope, keep going
+ jmp done // Out of entries
+handled:
+ sub $8, %rdx
+ addq $1, (%rdx)
done:
call lapic_eoi
retq
diff --git a/sys/arch/amd64/conf/GENERIC b/sys/arch/amd64/conf/GENERIC
index 95fe2e0..e407fa9 100644
--- a/sys/arch/amd64/conf/GENERIC
+++ b/sys/arch/amd64/conf/GENERIC
@@ -10,6 +10,7 @@ option SERIAL_DEBUG yes // Enable kmsg serial logging
option USER_KMSG no // Show kmsg in user consoles
option CPU_SMEP yes // Supervisor Memory Exec Protection
option PANIC_SCR no // Clear screen on panic
+option I8042_POLL yes // Use polling for the i8042
// Kernel constants
setval SCHED_NQUEUE 4 // Number of scheduler queues (for MLFQ)
diff --git a/sys/arch/amd64/isa/i8042.c b/sys/arch/amd64/isa/i8042.c
index cde70ff..3ae645d 100644
--- a/sys/arch/amd64/isa/i8042.c
+++ b/sys/arch/amd64/isa/i8042.c
@@ -53,6 +53,13 @@
#include <string.h>
#include <assert.h>
+/* From kconf(9) */
+#if !defined(__I8042_POLL)
+#define I8042_POLL 0
+#else
+#define I8042_POLL __I8042_POLL
+#endif
+
#define KEY_REP_MAX 2
#define pr_trace(fmt, ...) kprintf("i8042: " fmt, ##__VA_ARGS__)
@@ -424,13 +431,20 @@ i8042_init(void)
quirks |= I8042_HOSTILE;
pr_trace("ThinkPad T420s detected, assuming hostile\n");
pr_trace("disabling irq 1, polling as fallback\n");
- spawn(&polltd, i8042_sync_loop, NULL, 0, NULL);
}
- if (!ISSET(quirks, I8042_HOSTILE)) {
+ /*
+ * If the i8042 has the hostile quirk or we are
+ * configured to poll for events, spawn the polling
+ * thread.
+ */
+ if (!ISSET(quirks, I8042_HOSTILE) && !I8042_POLL) {
/* Enable interrupts */
i8042_drain();
i8042_en_intr();
+ } else if (ISSET(quirks, I8042_HOSTILE) || I8042_POLL) {
+ spawn(&polltd, i8042_sync_loop, NULL, 0, NULL);
+ pr_trace("polling events\n");
}
i8042_write(I8042_CMD, I8042_ENABLE_PORT0);