summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amd64/amd64/reboot.c47
-rw-r--r--sys/include/sys/spinlock.h3
-rw-r--r--sys/kern/kern_synch.c26
3 files changed, 43 insertions, 33 deletions
diff --git a/sys/arch/amd64/amd64/reboot.c b/sys/arch/amd64/amd64/reboot.c
index d47a352..8ebe15e 100644
--- a/sys/arch/amd64/amd64/reboot.c
+++ b/sys/arch/amd64/amd64/reboot.c
@@ -34,9 +34,49 @@
#include <machine/cpu.h>
#include <dev/acpi/acpi.h>
+static void
+cpu_reset_intel(struct cpu_info *ci)
+{
+ /*
+ * Ivy bridge processors and their panther point chipsets
+ * (family 6) can be reset through special PCH reset control
+ * registers
+ */
+ if (ci->family == 6) {
+ outb(0xCF9, 3 << 1);
+ }
+}
+
+/*
+ * Attempt to reboot the system, we do this in many
+ * stages of escalation. If a reset via the i8042
+ * controller fails and we are on an Intel processor,
+ * attempt a chipset specific reset. If that somehow fails
+ * as well, just smack the cpu with a NULL IDTR as well
+ * as an INT $0x0
+ */
+static void
+__cpu_reset(struct cpu_info *ci)
+{
+ /* Try via the i8042 */
+ outb(0x64, 0xFE);
+
+ /* Something went wrong if we are here */
+ if (ci == NULL) {
+ return;
+ }
+
+ if (ci->vendor == CPU_VENDOR_INTEL) {
+ cpu_reset_intel(ci);
+ }
+}
+
void
cpu_reboot(int method)
{
+ struct cpu_info *ci = this_cpu();
+ uint32_t *__dmmy = NULL;
+
if (ISSET(method, REBOOT_POWEROFF)) {
acpi_sleep(ACPI_SLEEP_S5);
}
@@ -45,10 +85,9 @@ cpu_reboot(int method)
cpu_halt_all();
}
- /* Pulse the reset line until the machine goes down */
- for (;;) {
- outb(0x64, 0xFE);
- }
+ __cpu_reset(ci);
+ asm volatile("lgdt %0; int $0x0" :: "m" (__dmmy));
+ __builtin_unreachable();
}
/*
diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h
index 140addc..b416152 100644
--- a/sys/include/sys/spinlock.h
+++ b/sys/include/sys/spinlock.h
@@ -44,9 +44,6 @@ void spinlock_release(struct spinlock *lock);
int spinlock_try_acquire(struct spinlock *lock);
int spinlock_usleep(struct spinlock *lock, size_t usec_max);
-/* System-wide locking (be careful!!) */
-int syslock(void);
-void sysrel(void);
#endif
#endif /* !_SYS_SPINLOCK_H_ */
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 3fe6047..7660f1f 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -42,9 +42,6 @@
#define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__)
#define pr_error(...) pr_trace(__VA_ARGS__)
-/* XXX: Be very careful with this */
-static struct spinlock __syslock;
-
/*
* Returns 0 on success, returns non-zero value
* on timeout/failure.
@@ -122,29 +119,6 @@ spinlock_release(struct spinlock *lock)
}
/*
- * Attempt to hold the system-wide lock, returns 1
- * if already held.
- *
- * XXX: Only use for CRITICAL code sections.
- */
-int
-syslock(void)
-{
- return spinlock_try_acquire(&__syslock);
-}
-
-/*
- * Release the system-wide lock
- *
- * XXX: Only use for CRITICAL code sections.
- */
-void
-sysrel(void)
-{
- spinlock_release(&__syslock);
-}
-
-/*
* Create a new mutex lock object
*/
struct mutex *