diff options
-rw-r--r-- | sys/arch/amd64/amd64/reboot.c | 47 | ||||
-rw-r--r-- | sys/include/sys/spinlock.h | 3 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 26 | ||||
-rw-r--r-- | usr.bin/screensave/screensave.c | 4 |
4 files changed, 45 insertions, 35 deletions
diff --git a/sys/arch/amd64/amd64/reboot.c b/sys/arch/amd64/amd64/reboot.c index d47a352..8ebe15e 100644 --- a/sys/arch/amd64/amd64/reboot.c +++ b/sys/arch/amd64/amd64/reboot.c @@ -34,9 +34,49 @@ #include <machine/cpu.h> #include <dev/acpi/acpi.h> +static void +cpu_reset_intel(struct cpu_info *ci) +{ + /* + * Ivy bridge processors and their panther point chipsets + * (family 6) can be reset through special PCH reset control + * registers + */ + if (ci->family == 6) { + outb(0xCF9, 3 << 1); + } +} + +/* + * Attempt to reboot the system, we do this in many + * stages of escalation. If a reset via the i8042 + * controller fails and we are on an Intel processor, + * attempt a chipset specific reset. If that somehow fails + * as well, just smack the cpu with a NULL IDTR as well + * as an INT $0x0 + */ +static void +__cpu_reset(struct cpu_info *ci) +{ + /* Try via the i8042 */ + outb(0x64, 0xFE); + + /* Something went wrong if we are here */ + if (ci == NULL) { + return; + } + + if (ci->vendor == CPU_VENDOR_INTEL) { + cpu_reset_intel(ci); + } +} + void cpu_reboot(int method) { + struct cpu_info *ci = this_cpu(); + uint32_t *__dmmy = NULL; + if (ISSET(method, REBOOT_POWEROFF)) { acpi_sleep(ACPI_SLEEP_S5); } @@ -45,10 +85,9 @@ cpu_reboot(int method) cpu_halt_all(); } - /* Pulse the reset line until the machine goes down */ - for (;;) { - outb(0x64, 0xFE); - } + __cpu_reset(ci); + asm volatile("lgdt %0; int $0x0" :: "m" (__dmmy)); + __builtin_unreachable(); } /* diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h index 140addc..b416152 100644 --- a/sys/include/sys/spinlock.h +++ b/sys/include/sys/spinlock.h @@ -44,9 +44,6 @@ void spinlock_release(struct spinlock *lock); int spinlock_try_acquire(struct spinlock *lock); int spinlock_usleep(struct spinlock *lock, size_t usec_max); -/* System-wide locking (be careful!!) */ -int syslock(void); -void sysrel(void); #endif #endif /* !_SYS_SPINLOCK_H_ */ diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 3fe6047..7660f1f 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -42,9 +42,6 @@ #define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) -/* XXX: Be very careful with this */ -static struct spinlock __syslock; - /* * Returns 0 on success, returns non-zero value * on timeout/failure. @@ -122,29 +119,6 @@ spinlock_release(struct spinlock *lock) } /* - * Attempt to hold the system-wide lock, returns 1 - * if already held. - * - * XXX: Only use for CRITICAL code sections. - */ -int -syslock(void) -{ - return spinlock_try_acquire(&__syslock); -} - -/* - * Release the system-wide lock - * - * XXX: Only use for CRITICAL code sections. - */ -void -sysrel(void) -{ - spinlock_release(&__syslock); -} - -/* * Create a new mutex lock object */ struct mutex * diff --git a/usr.bin/screensave/screensave.c b/usr.bin/screensave/screensave.c index 172ab8b..bb67cde 100644 --- a/usr.bin/screensave/screensave.c +++ b/usr.bin/screensave/screensave.c @@ -67,7 +67,7 @@ screensave(void) uint8_t step = 0; ts.tv_sec = 0; - ts.tv_nsec = 70000000; + ts.tv_nsec = 3000000; /* Begin the radiation ::) */ for (;;) { @@ -76,7 +76,7 @@ screensave(void) curpix = ctx.io[i]; nextpix = ctx.io[i + 1]; - /* If a multiple of 15, AND, otherwise XOR */ + /* If a multiple of 16, AND, otherwise XOR */ if ((n_iter & 15) != 0) { curpix ^= randbuf[0] & 3; nextpix ^= (curpix | (nextpix << 1)); |