From b1642ad065b04f452227bff58e951f67fb4cec47 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Thu, 17 Apr 2025 00:42:57 -0400 Subject: kernel: synch: Add lazy spinlock acquire Signed-off-by: Ian Moffett --- sys/include/sys/spinlock.h | 4 +++- sys/kern/kern_synch.c | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h index c136e05..b416152 100644 --- a/sys/include/sys/spinlock.h +++ b/sys/include/sys/spinlock.h @@ -33,13 +33,15 @@ #include struct spinlock { - volatile bool lock; + volatile int lock; }; #if defined(_KERNEL) void spinlock_acquire(struct spinlock *lock); void spinlock_release(struct spinlock *lock); + +int spinlock_try_acquire(struct spinlock *lock); int spinlock_usleep(struct spinlock *lock, size_t usec_max); #endif diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 2011c61..2b64673 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -79,6 +80,31 @@ spinlock_acquire(struct spinlock *lock) while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); } +/* + * Lazy acquire a spinlock + * + * spinlock_try_acquire() may only spin one thread + * at a time, threads that want to spin too must + * explicity do it on their own. + * + * This function returns 1 (a value that may be + * spinned on) when the lock is acquired and a + * thread is already spinning on it. + */ +int +spinlock_try_acquire(struct spinlock *lock) +{ + volatile int locked; + + locked = atomic_load_int(&lock->lock); + if (locked != 0) { + return 1; + } + + while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); + return 0; +} + void spinlock_release(struct spinlock *lock) { -- cgit v1.2.3