aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/include/sys/spinlock.h4
-rw-r--r--sys/kern/kern_synch.c26
2 files changed, 29 insertions, 1 deletions
diff --git a/sys/include/sys/spinlock.h b/sys/include/sys/spinlock.h
index c136e05..b416152 100644
--- a/sys/include/sys/spinlock.h
+++ b/sys/include/sys/spinlock.h
@@ -33,13 +33,15 @@
#include <sys/types.h>
struct spinlock {
- volatile bool lock;
+ volatile int lock;
};
#if defined(_KERNEL)
void spinlock_acquire(struct spinlock *lock);
void spinlock_release(struct spinlock *lock);
+
+int spinlock_try_acquire(struct spinlock *lock);
int spinlock_usleep(struct spinlock *lock, size_t usec_max);
#endif
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2011c61..2b64673 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -30,6 +30,7 @@
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/errno.h>
+#include <sys/atomic.h>
#include <sys/syslog.h>
#include <sys/spinlock.h>
#include <dev/timer.h>
@@ -79,6 +80,31 @@ spinlock_acquire(struct spinlock *lock)
while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE));
}
+/*
+ * Lazy acquire a spinlock
+ *
+ * spinlock_try_acquire() may only spin one thread
+ * at a time, threads that want to spin too must
+ * explicity do it on their own.
+ *
+ * This function returns 1 (a value that may be
+ * spinned on) when the lock is acquired and a
+ * thread is already spinning on it.
+ */
+int
+spinlock_try_acquire(struct spinlock *lock)
+{
+ volatile int locked;
+
+ locked = atomic_load_int(&lock->lock);
+ if (locked != 0) {
+ return 1;
+ }
+
+ while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE));
+ return 0;
+}
+
void
spinlock_release(struct spinlock *lock)
{