Lock.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/SourceLocation.h>
  7. #include <Kernel/Debug.h>
  8. #include <Kernel/KSyms.h>
  9. #include <Kernel/Lock.h>
  10. #include <Kernel/Thread.h>
  11. namespace Kernel {
  12. #if LOCK_DEBUG
  13. void Lock::lock(Mode mode, const SourceLocation& location)
  14. #else
  15. void Lock::lock(Mode mode)
  16. #endif
  17. {
  18. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  19. // and also from within critical sections!
  20. VERIFY(!Processor::current().in_irq());
  21. VERIFY(mode != Mode::Unlocked);
  22. auto current_thread = Thread::current();
  23. ScopedCritical critical; // in case we're not in a critical section already
  24. for (;;) {
  25. if (m_lock.exchange(true, AK::memory_order_acq_rel) != false) {
  26. // I don't know *who* is using "m_lock", so just yield.
  27. Scheduler::yield_from_critical();
  28. continue;
  29. }
  30. // FIXME: Do not add new readers if writers are queued.
  31. Mode current_mode = m_mode;
  32. switch (current_mode) {
  33. case Mode::Unlocked: {
  34. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
  35. m_mode = mode;
  36. VERIFY(!m_holder);
  37. VERIFY(m_shared_holders.is_empty());
  38. if (mode == Mode::Exclusive) {
  39. m_holder = current_thread;
  40. } else {
  41. VERIFY(mode == Mode::Shared);
  42. m_shared_holders.set(current_thread, 1);
  43. }
  44. VERIFY(m_times_locked == 0);
  45. m_times_locked++;
  46. #if LOCK_DEBUG
  47. if (current_thread) {
  48. current_thread->holding_lock(*this, 1, location);
  49. }
  50. #endif
  51. m_queue.should_block(true);
  52. m_lock.store(false, AK::memory_order_release);
  53. return;
  54. }
  55. case Mode::Exclusive: {
  56. VERIFY(m_holder);
  57. if (m_holder != current_thread)
  58. break;
  59. VERIFY(m_shared_holders.is_empty());
  60. if constexpr (LOCK_TRACE_DEBUG) {
  61. if (mode == Mode::Exclusive)
  62. dbgln("Lock::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
  63. else
  64. dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
  65. }
  66. VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
  67. VERIFY(m_times_locked > 0);
  68. m_times_locked++;
  69. #if LOCK_DEBUG
  70. current_thread->holding_lock(*this, 1, location);
  71. #endif
  72. m_lock.store(false, AK::memory_order_release);
  73. return;
  74. }
  75. case Mode::Shared: {
  76. VERIFY(!m_holder);
  77. if (mode != Mode::Shared)
  78. break;
  79. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
  80. VERIFY(m_times_locked > 0);
  81. m_times_locked++;
  82. VERIFY(!m_shared_holders.is_empty());
  83. auto it = m_shared_holders.find(current_thread);
  84. if (it != m_shared_holders.end())
  85. it->value++;
  86. else
  87. m_shared_holders.set(current_thread, 1);
  88. #if LOCK_DEBUG
  89. current_thread->holding_lock(*this, 1, location);
  90. #endif
  91. m_lock.store(false, AK::memory_order_release);
  92. return;
  93. }
  94. default:
  95. VERIFY_NOT_REACHED();
  96. }
  97. m_lock.store(false, AK::memory_order_release);
  98. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waiting...", this, m_name);
  99. m_queue.wait_forever(m_name);
  100. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waited", this, m_name);
  101. }
  102. }
  103. void Lock::unlock()
  104. {
  105. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  106. // and also from within critical sections!
  107. VERIFY(!Processor::current().in_irq());
  108. auto current_thread = Thread::current();
  109. ScopedCritical critical; // in case we're not in a critical section already
  110. for (;;) {
  111. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  112. Mode current_mode = m_mode;
  113. if constexpr (LOCK_TRACE_DEBUG) {
  114. if (current_mode == Mode::Shared)
  115. dbgln("Lock::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
  116. else
  117. dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
  118. }
  119. VERIFY(current_mode != Mode::Unlocked);
  120. VERIFY(m_times_locked > 0);
  121. m_times_locked--;
  122. switch (current_mode) {
  123. case Mode::Exclusive:
  124. VERIFY(m_holder == current_thread);
  125. VERIFY(m_shared_holders.is_empty());
  126. if (m_times_locked == 0)
  127. m_holder = nullptr;
  128. break;
  129. case Mode::Shared: {
  130. VERIFY(!m_holder);
  131. auto it = m_shared_holders.find(current_thread);
  132. VERIFY(it != m_shared_holders.end());
  133. if (it->value > 1) {
  134. it->value--;
  135. } else {
  136. VERIFY(it->value > 0);
  137. m_shared_holders.remove(it);
  138. }
  139. break;
  140. }
  141. default:
  142. VERIFY_NOT_REACHED();
  143. }
  144. bool unlocked_last = (m_times_locked == 0);
  145. if (unlocked_last) {
  146. VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
  147. m_mode = Mode::Unlocked;
  148. m_queue.should_block(false);
  149. }
  150. #if LOCK_DEBUG
  151. if (current_thread) {
  152. current_thread->holding_lock(*this, -1, {});
  153. }
  154. #endif
  155. m_lock.store(false, AK::memory_order_release);
  156. if (unlocked_last) {
  157. u32 did_wake = m_queue.wake_one();
  158. dbgln_if(LOCK_TRACE_DEBUG, "Lock::unlock @ {} ({}) wake one ({})", this, m_name, did_wake);
  159. }
  160. return;
  161. }
  162. // I don't know *who* is using "m_lock", so just yield.
  163. Scheduler::yield_from_critical();
  164. }
  165. }
  166. auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
  167. {
  168. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  169. // and also from within critical sections!
  170. VERIFY(!Processor::current().in_irq());
  171. auto current_thread = Thread::current();
  172. ScopedCritical critical; // in case we're not in a critical section already
  173. for (;;) {
  174. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  175. Mode previous_mode;
  176. auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
  177. switch (current_mode) {
  178. case Mode::Exclusive: {
  179. if (m_holder != current_thread) {
  180. m_lock.store(false, AK::MemoryOrder::memory_order_release);
  181. lock_count_to_restore = 0;
  182. return Mode::Unlocked;
  183. }
  184. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
  185. #if LOCK_DEBUG
  186. m_holder->holding_lock(*this, -(int)m_times_locked, {});
  187. #endif
  188. m_holder = nullptr;
  189. VERIFY(m_times_locked > 0);
  190. lock_count_to_restore = m_times_locked;
  191. m_times_locked = 0;
  192. m_mode = Mode::Unlocked;
  193. m_queue.should_block(false);
  194. m_lock.store(false, AK::memory_order_release);
  195. previous_mode = Mode::Exclusive;
  196. break;
  197. }
  198. case Mode::Shared: {
  199. VERIFY(!m_holder);
  200. auto it = m_shared_holders.find(current_thread);
  201. if (it == m_shared_holders.end()) {
  202. m_lock.store(false, AK::MemoryOrder::memory_order_release);
  203. lock_count_to_restore = 0;
  204. return Mode::Unlocked;
  205. }
  206. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
  207. this, it->value, m_times_locked);
  208. VERIFY(it->value > 0);
  209. lock_count_to_restore = it->value;
  210. VERIFY(lock_count_to_restore > 0);
  211. #if LOCK_DEBUG
  212. m_holder->holding_lock(*this, -(int)lock_count_to_restore, {});
  213. #endif
  214. m_shared_holders.remove(it);
  215. VERIFY(m_times_locked >= lock_count_to_restore);
  216. m_times_locked -= lock_count_to_restore;
  217. if (m_times_locked == 0) {
  218. m_mode = Mode::Unlocked;
  219. m_queue.should_block(false);
  220. }
  221. m_lock.store(false, AK::memory_order_release);
  222. previous_mode = Mode::Shared;
  223. break;
  224. }
  225. case Mode::Unlocked: {
  226. m_lock.store(false, AK::memory_order_relaxed);
  227. lock_count_to_restore = 0;
  228. previous_mode = Mode::Unlocked;
  229. break;
  230. }
  231. default:
  232. VERIFY_NOT_REACHED();
  233. }
  234. m_queue.wake_one();
  235. return previous_mode;
  236. }
  237. // I don't know *who* is using "m_lock", so just yield.
  238. Scheduler::yield_from_critical();
  239. }
  240. }
  241. #if LOCK_DEBUG
  242. void Lock::restore_lock(Mode mode, u32 lock_count, const SourceLocation& location)
  243. #else
  244. void Lock::restore_lock(Mode mode, u32 lock_count)
  245. #endif
  246. {
  247. VERIFY(mode != Mode::Unlocked);
  248. VERIFY(lock_count > 0);
  249. VERIFY(!Processor::current().in_irq());
  250. auto current_thread = Thread::current();
  251. ScopedCritical critical; // in case we're not in a critical section already
  252. for (;;) {
  253. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  254. switch (mode) {
  255. case Mode::Exclusive: {
  256. auto expected_mode = Mode::Unlocked;
  257. if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive))
  258. break;
  259. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
  260. VERIFY(m_times_locked == 0);
  261. m_times_locked = lock_count;
  262. VERIFY(!m_holder);
  263. VERIFY(m_shared_holders.is_empty());
  264. m_holder = current_thread;
  265. m_queue.should_block(true);
  266. m_lock.store(false, AK::memory_order_release);
  267. #if LOCK_DEBUG
  268. m_holder->holding_lock(*this, (int)lock_count, location);
  269. #endif
  270. return;
  271. }
  272. case Mode::Shared: {
  273. auto expected_mode = Mode::Unlocked;
  274. if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared)
  275. break;
  276. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
  277. this, mode_to_string(mode), lock_count, mode_to_string(expected_mode));
  278. VERIFY(expected_mode == Mode::Shared || m_times_locked == 0);
  279. m_times_locked += lock_count;
  280. VERIFY(!m_holder);
  281. VERIFY((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
  282. auto set_result = m_shared_holders.set(current_thread, lock_count);
  283. // There may be other shared lock holders already, but we should not have an entry yet
  284. VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
  285. m_queue.should_block(true);
  286. m_lock.store(false, AK::memory_order_release);
  287. #if LOCK_DEBUG
  288. m_holder->holding_lock(*this, (int)lock_count, location);
  289. #endif
  290. return;
  291. }
  292. default:
  293. VERIFY_NOT_REACHED();
  294. }
  295. m_lock.store(false, AK::memory_order_relaxed);
  296. }
  297. // I don't know *who* is using "m_lock", so just yield.
  298. Scheduler::yield_from_critical();
  299. }
  300. }
  301. void Lock::clear_waiters()
  302. {
  303. VERIFY(m_mode != Mode::Shared);
  304. m_queue.wake_all();
  305. }
  306. }