Skip to content

Commit 0159f77

Browse files
committedJul 22, 2023
feat: added spinlock implementation from rigtorp. won't be using it however for now
1 parent 551d3be commit 0159f77

File tree

1 file changed

+59
-0
lines changed

1 file changed

+59
-0
lines changed
 

‎include/zephyr/spin_lock.hpp

+59
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#ifndef ZEPHYR_SPIN_LOCK_HPP_
2+
#define ZEPHYR_SPIN_LOCK_HPP_
3+
4+
#include "zephyr/hardware_interference_size.hpp"
5+
6+
#include <atomic>
7+
8+
namespace ze {
9+
/*
10+
* @brief A spin lock implementation by Erik Rigtop (https://rigtorp.se/spinlock/)
11+
* However, there's a popular forum post by Linux Torvalds about how spin locks shouldn't
12+
* be used in the user-space. (https://www.realworldtech.com/forum/?threadid=189711&curpostid=189723)
13+
* The gist is that: Using spin locks in user-space can lead to unpredictable latencies
14+
* and contention issues because user-space threads can be involuntarily scheduled out by the OS
15+
* while holding the lock after using up their time slice, causing other threads to spin
16+
* unnecessarily.
17+
* */
18+
struct alignas(hardware_constructive_interference_size) spin_lock {
19+
std::atomic<bool> m_lock alignas(hardware_constructive_interference_size) = {false};
20+
21+
/*
22+
* Attempt to acquire the mutex. Otherwise, busy wait (spin) until it is available.
23+
* */
24+
inline void lock() noexcept {
25+
while (true) {
26+
// Optimistically assume the lock is free on the first try
27+
if (!m_lock.exchange(true, std::memory_order_acquire)) { return; }
28+
29+
// Wait for the lock to be released without generating cache misses
30+
while (m_lock.load(std::memory_order_relaxed)) {
31+
// Issue X86 PAUSE or ARM YIELD instruction to reduce contention between
32+
// hyper-threads
33+
__builtin_ia32_pause();
34+
}
35+
}
36+
}
37+
38+
/*
39+
* Attempt to acquire the mutex, but unlike lock() it does not wait on failure
40+
*
41+
* @return true if the lock was acquired, false otherwise.
42+
* */
43+
inline bool try_lock() noexcept {
44+
// First do a relaxed load to check if lock is free in order to prevent
45+
// unecessary cache misses if someone does while(!try_lock())
46+
return !m_lock.load(std::memory_order_relaxed) &&
47+
!m_lock.exchange(true, std::memory_order_acquire);
48+
}
49+
50+
/*
51+
* Releases the mutex
52+
* */
53+
inline void unlock() noexcept {
54+
m_lock.store(false, std::memory_order_release);
55+
}
56+
};
57+
} // namespace ze
58+
59+
#endif // ZEPHYR_SPIN_LOCK_HPP_

0 commit comments

Comments
 (0)
Please sign in to comment.