diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_exit.c | 8 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 31 |
2 files changed, 11 insertions, 28 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 9377eed..af697d7 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -190,6 +190,14 @@ exit1(struct proc *td, int flags) * and do not return. */ if (target_pid == curpid) { + /* + * If the thread is exiting on a core that is not + * preemptable, something is not right. + */ + if (__unlikely(!sched_preemptable())) { + panic("exit1: cpu %d not preemptable\n", ci->id); + } + ci->curtd = NULL; if (parent->pid == 0) sched_enter(); diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 497aff7..7660f1f 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -42,9 +42,6 @@ #define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) -/* XXX: Be very careful with this */ -static struct spinlock __syslock; - /* * Returns 0 on success, returns non-zero value * on timeout/failure. @@ -84,6 +81,7 @@ spinlock_usleep(struct spinlock *lock, size_t usec_max) void spinlock_acquire(struct spinlock *lock) { + sched_preempt_set(false); while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)) { md_pause(); } @@ -110,37 +108,14 @@ spinlock_try_acquire(struct spinlock *lock) return 1; } - while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); - return 0; + return __atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE); } void spinlock_release(struct spinlock *lock) { __atomic_clear(&lock->lock, __ATOMIC_RELEASE); -} - -/* - * Attempt to hold the system-wide lock, returns 1 - * if already held. - * - * XXX: Only use for CRITICAL code sections. - */ -int -syslock(void) -{ - return spinlock_try_acquire(&__syslock); -} - -/* - * Release the system-wide lock - * - * XXX: Only use for CRITICAL code sections. - */ -void -sysrel(void) -{ - spinlock_release(&__syslock); + sched_preempt_set(true); } /* |