1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
#ifndef MLIBC_LOCK_HPP
#define MLIBC_LOCK_HPP
#include <errno.h>
#include <stdint.h>
#include <mlibc/internal-sysdeps.hpp>
#include <mlibc/debug.hpp>
#include <mlibc/tid.hpp>
#include <bits/ensure.h>
template<bool Recursive>
struct FutexLockImpl {
FutexLockImpl() : _state{0}, _recursion{0} { }
FutexLockImpl(const FutexLockImpl &) = delete;
FutexLockImpl &operator= (const FutexLockImpl &) = delete;
static constexpr uint32_t waitersBit = (1 << 31);
static constexpr uint32_t ownerMask = (static_cast<uint32_t>(1) << 30) - 1;
void lock() {
unsigned int this_tid = mlibc::this_tid();
unsigned int expected = 0;
while(true) {
if(!expected) {
// Try to take the mutex here.
if(__atomic_compare_exchange_n(&_state,
&expected, this_tid, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
if constexpr (Recursive) {
__ensure(!_recursion);
_recursion = 1;
}
return;
}
}else{
// If this (recursive) mutex is already owned by us, increment the recursion level.
if((expected & ownerMask) == this_tid) {
if constexpr (Recursive)
++_recursion;
else
mlibc::panicLogger() << "mlibc: FutexLock deadlock detected!" << frg::endlog;
return;
}
// Wait on the futex if the waiters flag is set.
if(expected & waitersBit) {
int e = mlibc::sys_futex_wait((int *)&_state, expected, nullptr);
// If the wait returns EAGAIN, that means that the waitersBit was just unset by
// some other thread. In this case, we should loop back around.
if (e && e != EAGAIN)
mlibc::panicLogger() << "sys_futex_wait() failed with error code " << e << frg::endlog;
// Opportunistically try to take the lock after we wake up.
expected = 0;
}else{
// Otherwise we have to set the waiters flag first.
unsigned int desired = expected | waitersBit;
if(__atomic_compare_exchange_n((int *)&_state,
reinterpret_cast<int*>(&expected), desired, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
expected = desired;
}
}
}
}
bool try_lock() {
unsigned int this_tid = mlibc::this_tid();
unsigned int expected = __atomic_load_n(&_state, __ATOMIC_RELAXED);
if(!expected) {
// Try to take the mutex here.
if(__atomic_compare_exchange_n(&_state,
&expected, this_tid, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
if constexpr (Recursive)
_recursion = 1;
return true;
}
} else {
// If this (recursive) mutex is already owned by us, increment the recursion level.
if((expected & ownerMask) == this_tid) {
if constexpr (Recursive) {
__ensure(!_recursion);
++_recursion;
return true;
} else {
return false;
}
}
}
return false;
}
void unlock() {
// Decrement the recursion level and unlock if we hit zero.
if constexpr (Recursive) {
__ensure(_recursion);
if(--_recursion)
return;
}
// Reset the mutex to the unlocked state.
auto state = __atomic_exchange_n(&_state, 0, __ATOMIC_RELEASE);
__ensure((state & ownerMask) == mlibc::this_tid());
if(state & waitersBit) {
// Wake the futex if there were waiters. Since the mutex might not exist at this location
// anymore, we must conservatively ignore EACCES and EINVAL which may occur as a result.
int e = mlibc::sys_futex_wake((int *)&_state);
__ensure(e >= 0 || e == EACCES || e == EINVAL);
}
}
private:
uint32_t _state;
uint32_t _recursion;
};
using FutexLock = FutexLockImpl<false>;
using RecursiveFutexLock = FutexLockImpl<true>;
#endif
|