summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/amd64/amd64/machdep.c1
-rw-r--r--sys/arch/amd64/amd64/proc_machdep.c35
-rw-r--r--sys/arch/amd64/amd64/reboot.c47
-rw-r--r--sys/arch/amd64/isa/i8042.c1
-rw-r--r--sys/include/arch/amd64/cpu.h1
-rw-r--r--sys/include/sys/sched.h3
-rw-r--r--sys/include/sys/spinlock.h3
-rw-r--r--sys/kern/kern_exit.c8
-rw-r--r--sys/kern/kern_synch.c31
-rw-r--r--usr.bin/screensave/screensave.c4
10 files changed, 96 insertions, 38 deletions
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index 94b2d18..72de150 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -609,6 +609,7 @@ cpu_startup(struct cpu_info *ci)
try_mitigate_spectre();
ci->online = 1;
+ ci->preempt = 1;
cpu_get_info(ci);
cpu_enable_smep();
diff --git a/sys/arch/amd64/amd64/proc_machdep.c b/sys/arch/amd64/amd64/proc_machdep.c
index a1d6563..82b4e4f 100644
--- a/sys/arch/amd64/amd64/proc_machdep.c
+++ b/sys/arch/amd64/amd64/proc_machdep.c
@@ -264,6 +264,36 @@ sched_switch_to(struct trapframe *tf, struct proc *td)
}
/*
+ * Enable or disable preemption on the current
+ * processor
+ *
+ * @enable: Enable preemption if true
+ */
+void
+sched_preempt_set(bool enable)
+{
+ struct cpu_info *ci = this_cpu();
+
+ if (ci == NULL) {
+ return;
+ }
+
+ ci->preempt = enable;
+}
+
+bool
+sched_preemptable(void)
+{
+ struct cpu_info *ci = this_cpu();
+
+ if (ci == NULL) {
+ return false;
+ }
+
+ return ci->preempt;
+}
+
+/*
* Perform a context switch.
*/
void
@@ -273,6 +303,11 @@ md_sched_switch(struct trapframe *tf)
struct cpu_info *ci;
ci = this_cpu();
+ if (!ci->preempt) {
+ sched_oneshot(false);
+ return;
+ }
+
td = ci->curtd;
mi_sched_switch(td);
diff --git a/sys/arch/amd64/amd64/reboot.c b/sys/arch/amd64/amd64/reboot.c
index d47a352..8ebe15e 100644
--- a/sys/arch/amd64/amd64/reboot.c
+++ b/sys/arch/amd64/amd64/reboot.c
@@ -34,9 +34,49 @@
#include <machine/cpu.h>
#include <dev/acpi/acpi.h>
+static void
+cpu_reset_intel(struct cpu_info *ci)
+{
+ /*
+ * Ivy bridge processors and their panther point chipsets
+ * (family 6) can be reset through special PCH reset control
+ * registers
+ */
+ if (ci->family == 6) {
+ outb(0xCF9, 3 << 1);
+ }
+}
+
+/*
+ * Attempt to reboot the system, we do this in many
+ * stages of escalation. If a reset via the i8042
+ * controller fails and we are on an Intel processor,
+ * attempt a chipset specific reset. If that somehow fails
+ * as well, just smack the cpu with a NULL IDTR as well
+ * as an INT $0x0
+ */
+static void
+__cpu_reset(struct cpu_info *ci)
+{
+ /* Try via the i8042 */
+ outb(0x64, 0xFE);
+
+ /* Something went wrong if we are here */
+ if (ci == NULL) {
+ return;
+ }
+
+ if (ci->vendor == CPU_VENDOR_INTEL) {
+ cpu_reset_intel(ci);
+ }
+}
+
void
cpu_reboot(int method)
{
+ struct cpu_info *ci = this_cpu();
+ uint32_t *__dmmy = NULL;
+
if (ISSET(method, REBOOT_POWEROFF)) {
acpi_sleep(ACPI_SLEEP_S5);
}
@@ -45,10 +85,9 @@ cpu_reboot(int method)
cpu_halt_all();
}
- /* Pulse the reset line until the machine goes down */
- for (;;) {
- outb(0x64, 0xFE);
- }
+ __cpu_reset(ci);
+ asm volatile("lgdt %0; int $0x0" :: "m" (__dmmy));
+ __builtin_unreachable();
}
/*
diff --git a/sys/arch/amd64/isa/i8042.c b/sys/arch/amd64/isa/i8042.c
index eae3c32..095f1f4 100644
--- a/sys/arch/amd64/isa/i8042.c
+++ b/sys/arch/amd64/isa/i8042.c
@@ -89,7 +89,6 @@ static struct i8042_databuf i8042_etab[] = {
}
};
-static struct spinlock data_lock;
static struct spinlock isr_lock;
static bool shift_key = false;
static bool capslock = false;
diff --git a/sys/include/arch/amd64/cpu.h b/sys/include/arch/amd64/cpu.h
index 6ed675e..cf073fe 100644
--- a/sys/include/arch/amd64/cpu.h
+++ b/sys/include/arch/amd64/cpu.h
@@ -58,6 +58,7 @@ struct cpu_info {
uint32_t apicid;
uint32_t feat;
uint32_t vendor; /* Vendor (see CPU_VENDOR_*) */
+ uint8_t preempt : 1; /* CPU is preemptable */
uint8_t ipi_dispatch : 1; /* 1: IPIs being dispatched */
uint8_t ipi_id;
ipi_pend_t ipi_pending[N_IPIVEC];
diff --git a/sys/include/sys/sched.h b/sys/include/sys/sched.h
index 8b0ba02..7bba9df 100644
--- a/sys/include/sys/sched.h
+++ b/sys/include/sys/sched.h
@@ -66,6 +66,9 @@ struct sched_stat {
void sched_stat(struct sched_stat *statp);
void sched_init(void);
+void sched_preempt_set(bool enable);
+bool sched_preemptable(void);
+
void sched_yield(void);
void sched_suspend(struct proc *td, const struct timeval *tv);
void sched_detach(struct proc *td);
diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h
index 140addc..b416152 100644
--- a/sys/include/sys/spinlock.h
+++ b/sys/include/sys/spinlock.h
@@ -44,9 +44,6 @@ void spinlock_release(struct spinlock *lock);
int spinlock_try_acquire(struct spinlock *lock);
int spinlock_usleep(struct spinlock *lock, size_t usec_max);
-/* System-wide locking (be careful!!) */
-int syslock(void);
-void sysrel(void);
#endif
#endif /* !_SYS_SPINLOCK_H_ */
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 9377eed..af697d7 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -190,6 +190,14 @@ exit1(struct proc *td, int flags)
* and do not return.
*/
if (target_pid == curpid) {
+ /*
+ * If the thread is exiting on a core that is not
+ * preemptable, something is not right.
+ */
+ if (__unlikely(!sched_preemptable())) {
+ panic("exit1: cpu %d not preemptable\n", ci->id);
+ }
+
ci->curtd = NULL;
if (parent->pid == 0)
sched_enter();
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 497aff7..7660f1f 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -42,9 +42,6 @@
#define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__)
#define pr_error(...) pr_trace(__VA_ARGS__)
-/* XXX: Be very careful with this */
-static struct spinlock __syslock;
-
/*
* Returns 0 on success, returns non-zero value
* on timeout/failure.
@@ -84,6 +81,7 @@ spinlock_usleep(struct spinlock *lock, size_t usec_max)
void
spinlock_acquire(struct spinlock *lock)
{
+ sched_preempt_set(false);
while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)) {
md_pause();
}
@@ -110,37 +108,14 @@ spinlock_try_acquire(struct spinlock *lock)
return 1;
}
- while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE));
- return 0;
+ return __atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE);
}
void
spinlock_release(struct spinlock *lock)
{
__atomic_clear(&lock->lock, __ATOMIC_RELEASE);
-}
-
-/*
- * Attempt to hold the system-wide lock, returns 1
- * if already held.
- *
- * XXX: Only use for CRITICAL code sections.
- */
-int
-syslock(void)
-{
- return spinlock_try_acquire(&__syslock);
-}
-
-/*
- * Release the system-wide lock
- *
- * XXX: Only use for CRITICAL code sections.
- */
-void
-sysrel(void)
-{
- spinlock_release(&__syslock);
+ sched_preempt_set(true);
}
/*
diff --git a/usr.bin/screensave/screensave.c b/usr.bin/screensave/screensave.c
index 172ab8b..bb67cde 100644
--- a/usr.bin/screensave/screensave.c
+++ b/usr.bin/screensave/screensave.c
@@ -67,7 +67,7 @@ screensave(void)
uint8_t step = 0;
ts.tv_sec = 0;
- ts.tv_nsec = 70000000;
+ ts.tv_nsec = 3000000;
/* Begin the radiation ::) */
for (;;) {
@@ -76,7 +76,7 @@ screensave(void)
curpix = ctx.io[i];
nextpix = ctx.io[i + 1];
- /* If a multiple of 15, AND, otherwise XOR */
+ /* If a multiple of 16, AND, otherwise XOR */
if ((n_iter & 15) != 0) {
curpix ^= randbuf[0] & 3;
nextpix ^= (curpix | (nextpix << 1));