diff options
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r-- | sys/kern/kern_synch.c | 69 |
1 files changed, 52 insertions, 17 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 57b27d0..7660f1f 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -28,19 +28,20 @@ */ #include <sys/types.h> +#include <sys/mutex.h> #include <sys/systm.h> #include <sys/errno.h> +#include <sys/sched.h> #include <sys/atomic.h> #include <sys/syslog.h> #include <sys/spinlock.h> +#include <machine/cdefs.h> #include <dev/timer.h> +#include <string.h> #define pr_trace(fmt, ...) kprintf("synch: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) -/* XXX: Be very careful with this */ -static struct spinlock __syslock; - /* * Returns 0 on success, returns non-zero value * on timeout/failure. @@ -80,7 +81,10 @@ spinlock_usleep(struct spinlock *lock, size_t usec_max) void spinlock_acquire(struct spinlock *lock) { - while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); + sched_preempt_set(false); + while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)) { + md_pause(); + } } /* @@ -104,35 +108,66 @@ spinlock_try_acquire(struct spinlock *lock) return 1; } - while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)); - return 0; + return __atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE); } void spinlock_release(struct spinlock *lock) { __atomic_clear(&lock->lock, __ATOMIC_RELEASE); + sched_preempt_set(true); } /* - * Attempt to hold the system-wide lock, returns 1 - * if already held. - * - * XXX: Only use for CRITICAL code sections. + * Create a new mutex lock object */ -int -syslock(void) +struct mutex * +mutex_new(const char *name) { - return spinlock_try_acquire(&__syslock); + struct mutex *mtx; + size_t namelen; + + mtx = dynalloc(sizeof(*mtx)); + if (mtx == NULL) { + return NULL; + } + + mtx->lock = 0; + namelen = strlen(name); + + /* Don't overflow the name buffer */ + if (namelen >= MUTEX_NAME_LEN) { + namelen = MUTEX_NAME_LEN - 1; + } + + memcpy(mtx->name, name, namelen); + return mtx; } /* - * Release the system-wide lock + * Acquire a mutex * - * XXX: Only use for CRITICAL code sections. + * @mtx: Mutex to acquire + * @flags: Optional flags */ +int +mutex_acquire(struct mutex *mtx, int flags) +{ + while (__atomic_test_and_set(&mtx->lock, __ATOMIC_ACQUIRE)) { + sched_yield(); + } + + return 0; +} + +void +mutex_release(struct mutex *mtx) +{ + __atomic_clear(&mtx->lock, __ATOMIC_RELEASE); +} + void -sysrel(void) +mutex_free(struct mutex *mtx) { - spinlock_release(&__syslock); + dynfree(mtx); } |