diff options
author | Ian Moffett <ian@osmora.org> | 2025-04-18 21:57:44 -0400 |
---|---|---|
committer | Ian Moffett <ian@osmora.org> | 2025-04-18 21:57:44 -0400 |
commit | 9906547712a88cf4dc012a6f6bd6e2ad04c5e3f3 (patch) | |
tree | 41b40ec97f5082793b08a495f6a935bc3c1ed25f /sys/kern/kern_synch.c | |
parent | 0b5adaff02190dad76d845381a41b998696d9e97 (diff) | |
parent | 92d4f9dae64ab5325feca1f39e5955415e8275b9 (diff) |
Merge branch 'expt' into aarch64
Signed-off-by: Ian Moffett <ian@osmora.org>
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r-- | sys/kern/kern_synch.c | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 2011c61..57b27d0 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -30,6 +30,7 @@ #include <sys/types.h> #include <sys/systm.h> #include <sys/errno.h> +#include <sys/atomic.h> #include <sys/syslog.h> #include <sys/spinlock.h> #include <dev/timer.h> @@ -37,6 +38,9 @@ #define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) +/* XXX: Be very careful with this */ +static struct spinlock __syslock; + /* * Returns 0 on success, returns non-zero value * on timeout/failure. @@ -79,8 +83,56 @@ spinlock_acquire(struct spinlock *lock) while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); } +/* + * Lazy acquire a spinlock + * + * spinlock_try_acquire() may only spin one thread + * at a time, threads that want to spin too must + * explicity do it on their own. + * + * This function returns 1 (a value that may be + * spinned on) when the lock is acquired and a + * thread is already spinning on it. + */ +int +spinlock_try_acquire(struct spinlock *lock) +{ + volatile int locked; + + locked = atomic_load_int(&lock->lock); + if (locked != 0) { + return 1; + } + + while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); + return 0; +} + void spinlock_release(struct spinlock *lock) { __atomic_clear(&lock->lock, __ATOMIC_RELEASE); } + +/* + * Attempt to hold the system-wide lock, returns 1 + * if already held. + * + * XXX: Only use for CRITICAL code sections. + */ +int +syslock(void) +{ + return spinlock_try_acquire(&__syslock); +} + +/* + * Release the system-wide lock + * + * XXX: Only use for CRITICAL code sections. + */ +void +sysrel(void) +{ + spinlock_release(&__syslock); +} |