summaryrefslogtreecommitdiff
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c112
1 files changed, 112 insertions, 0 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2011c61..497aff7 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -28,15 +28,23 @@
*/
#include <sys/types.h>
+#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/errno.h>
+#include <sys/sched.h>
+#include <sys/atomic.h>
#include <sys/syslog.h>
#include <sys/spinlock.h>
+#include <machine/cdefs.h>
#include <dev/timer.h>
+#include <string.h>
#define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__)
#define pr_error(...) pr_trace(__VA_ARGS__)
+/* XXX: Be very careful with this */
+static struct spinlock __syslock;
+
/*
* Returns 0 on success, returns non-zero value
* on timeout/failure.
@@ -76,7 +84,34 @@ spinlock_usleep(struct spinlock *lock, size_t usec_max)
void
spinlock_acquire(struct spinlock *lock)
{
+ while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)) {
+ md_pause();
+ }
+}
+
+/*
+ * Lazy acquire a spinlock
+ *
+ * spinlock_try_acquire() may only spin one thread
+ * at a time, threads that want to spin too must
+ * explicity do it on their own.
+ *
+ * This function returns 1 (a value that may be
+ * spinned on) when the lock is acquired and a
+ * thread is already spinning on it.
+ */
+int
+spinlock_try_acquire(struct spinlock *lock)
+{
+ volatile int locked;
+
+ locked = atomic_load_int(&lock->lock);
+ if (locked != 0) {
+ return 1;
+ }
+
while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE));
+ return 0;
}
void
@@ -84,3 +119,80 @@ spinlock_release(struct spinlock *lock)
{
__atomic_clear(&lock->lock, __ATOMIC_RELEASE);
}
+
+/*
+ * Attempt to hold the system-wide lock, returns 1
+ * if already held.
+ *
+ * XXX: Only use for CRITICAL code sections.
+ */
+int
+syslock(void)
+{
+ return spinlock_try_acquire(&__syslock);
+}
+
+/*
+ * Release the system-wide lock
+ *
+ * XXX: Only use for CRITICAL code sections.
+ */
+void
+sysrel(void)
+{
+ spinlock_release(&__syslock);
+}
+
+/*
+ * Create a new mutex lock object
+ */
+struct mutex *
+mutex_new(const char *name)
+{
+ struct mutex *mtx;
+ size_t namelen;
+
+ mtx = dynalloc(sizeof(*mtx));
+ if (mtx == NULL) {
+ return NULL;
+ }
+
+ mtx->lock = 0;
+ namelen = strlen(name);
+
+ /* Don't overflow the name buffer */
+ if (namelen >= MUTEX_NAME_LEN) {
+ namelen = MUTEX_NAME_LEN - 1;
+ }
+
+ memcpy(mtx->name, name, namelen);
+ return mtx;
+}
+
+/*
+ * Acquire a mutex
+ *
+ * @mtx: Mutex to acquire
+ * @flags: Optional flags
+ */
+int
+mutex_acquire(struct mutex *mtx, int flags)
+{
+ while (__atomic_test_and_set(&mtx->lock, __ATOMIC_ACQUIRE)) {
+ sched_yield();
+ }
+
+ return 0;
+}
+
+void
+mutex_release(struct mutex *mtx)
+{
+ __atomic_clear(&mtx->lock, __ATOMIC_RELEASE);
+}
+
+void
+mutex_free(struct mutex *mtx)
+{
+ dynfree(mtx);
+}