Lock.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/SourceLocation.h>
  7. #include <AK/TemporaryChange.h>
  8. #include <Kernel/Debug.h>
  9. #include <Kernel/KSyms.h>
  10. #include <Kernel/Lock.h>
  11. #include <Kernel/Thread.h>
  12. namespace Kernel {
  13. #if LOCK_DEBUG
  14. void Lock::lock(Mode mode, const SourceLocation& location)
  15. #else
  16. void Lock::lock(Mode mode)
  17. #endif
  18. {
  19. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  20. // and also from within critical sections!
  21. VERIFY(!Processor::current().in_irq());
  22. VERIFY(mode != Mode::Unlocked);
  23. auto current_thread = Thread::current();
  24. ScopedCritical critical; // in case we're not in a critical section already
  25. for (;;) {
  26. if (m_lock.exchange(true, AK::memory_order_acq_rel) != false) {
  27. // I don't know *who* is using "m_lock", so just yield.
  28. Scheduler::yield_from_critical();
  29. continue;
  30. }
  31. // FIXME: Do not add new readers if writers are queued.
  32. Mode current_mode = m_mode;
  33. switch (current_mode) {
  34. case Mode::Unlocked: {
  35. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
  36. m_mode = mode;
  37. VERIFY(!m_holder);
  38. VERIFY(m_shared_holders.is_empty());
  39. if (mode == Mode::Exclusive) {
  40. m_holder = current_thread;
  41. } else {
  42. VERIFY(mode == Mode::Shared);
  43. m_shared_holders.set(current_thread, 1);
  44. }
  45. VERIFY(m_times_locked == 0);
  46. m_times_locked++;
  47. #if LOCK_DEBUG
  48. if (current_thread) {
  49. current_thread->holding_lock(*this, 1, location);
  50. }
  51. #endif
  52. m_queue.should_block(true);
  53. m_lock.store(false, AK::memory_order_release);
  54. return;
  55. }
  56. case Mode::Exclusive: {
  57. VERIFY(m_holder);
  58. if (m_holder != current_thread)
  59. break;
  60. VERIFY(m_shared_holders.is_empty());
  61. if constexpr (LOCK_TRACE_DEBUG) {
  62. if (mode == Mode::Exclusive)
  63. dbgln("Lock::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
  64. else
  65. dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
  66. }
  67. VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
  68. VERIFY(m_times_locked > 0);
  69. m_times_locked++;
  70. #if LOCK_DEBUG
  71. current_thread->holding_lock(*this, 1, location);
  72. #endif
  73. m_lock.store(false, AK::memory_order_release);
  74. return;
  75. }
  76. case Mode::Shared: {
  77. VERIFY(!m_holder);
  78. if (mode != Mode::Shared)
  79. break;
  80. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
  81. VERIFY(m_times_locked > 0);
  82. m_times_locked++;
  83. VERIFY(!m_shared_holders.is_empty());
  84. auto it = m_shared_holders.find(current_thread);
  85. if (it != m_shared_holders.end())
  86. it->value++;
  87. else
  88. m_shared_holders.set(current_thread, 1);
  89. #if LOCK_DEBUG
  90. current_thread->holding_lock(*this, 1, location);
  91. #endif
  92. m_lock.store(false, AK::memory_order_release);
  93. return;
  94. }
  95. default:
  96. VERIFY_NOT_REACHED();
  97. }
  98. m_lock.store(false, AK::memory_order_release);
  99. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waiting...", this, m_name);
  100. m_queue.wait_forever(m_name);
  101. dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waited", this, m_name);
  102. }
  103. }
  104. void Lock::unlock()
  105. {
  106. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  107. // and also from within critical sections!
  108. VERIFY(!Processor::current().in_irq());
  109. auto current_thread = Thread::current();
  110. ScopedCritical critical; // in case we're not in a critical section already
  111. for (;;) {
  112. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  113. Mode current_mode = m_mode;
  114. if constexpr (LOCK_TRACE_DEBUG) {
  115. if (current_mode == Mode::Shared)
  116. dbgln("Lock::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
  117. else
  118. dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
  119. }
  120. VERIFY(current_mode != Mode::Unlocked);
  121. VERIFY(m_times_locked > 0);
  122. m_times_locked--;
  123. switch (current_mode) {
  124. case Mode::Exclusive:
  125. VERIFY(m_holder == current_thread);
  126. VERIFY(m_shared_holders.is_empty());
  127. if (m_times_locked == 0)
  128. m_holder = nullptr;
  129. break;
  130. case Mode::Shared: {
  131. VERIFY(!m_holder);
  132. auto it = m_shared_holders.find(current_thread);
  133. VERIFY(it != m_shared_holders.end());
  134. if (it->value > 1) {
  135. it->value--;
  136. } else {
  137. VERIFY(it->value > 0);
  138. m_shared_holders.remove(it);
  139. }
  140. break;
  141. }
  142. default:
  143. VERIFY_NOT_REACHED();
  144. }
  145. bool unlocked_last = (m_times_locked == 0);
  146. if (unlocked_last) {
  147. VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
  148. m_mode = Mode::Unlocked;
  149. m_queue.should_block(false);
  150. }
  151. #if LOCK_DEBUG
  152. if (current_thread) {
  153. current_thread->holding_lock(*this, -1, {});
  154. }
  155. #endif
  156. m_lock.store(false, AK::memory_order_release);
  157. if (unlocked_last) {
  158. u32 did_wake = m_queue.wake_one();
  159. dbgln_if(LOCK_TRACE_DEBUG, "Lock::unlock @ {} ({}) wake one ({})", this, m_name, did_wake);
  160. }
  161. return;
  162. }
  163. // I don't know *who* is using "m_lock", so just yield.
  164. Scheduler::yield_from_critical();
  165. }
  166. }
  167. auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
  168. {
  169. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  170. // and also from within critical sections!
  171. VERIFY(!Processor::current().in_irq());
  172. auto current_thread = Thread::current();
  173. ScopedCritical critical; // in case we're not in a critical section already
  174. for (;;) {
  175. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  176. Mode previous_mode;
  177. auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
  178. switch (current_mode) {
  179. case Mode::Exclusive: {
  180. if (m_holder != current_thread) {
  181. m_lock.store(false, AK::MemoryOrder::memory_order_release);
  182. lock_count_to_restore = 0;
  183. return Mode::Unlocked;
  184. }
  185. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
  186. #if LOCK_DEBUG
  187. m_holder->holding_lock(*this, -(int)m_times_locked, {});
  188. #endif
  189. m_holder = nullptr;
  190. VERIFY(m_times_locked > 0);
  191. lock_count_to_restore = m_times_locked;
  192. m_times_locked = 0;
  193. m_mode = Mode::Unlocked;
  194. m_queue.should_block(false);
  195. m_lock.store(false, AK::memory_order_release);
  196. previous_mode = Mode::Exclusive;
  197. break;
  198. }
  199. case Mode::Shared: {
  200. VERIFY(!m_holder);
  201. auto it = m_shared_holders.find(current_thread);
  202. if (it == m_shared_holders.end()) {
  203. m_lock.store(false, AK::MemoryOrder::memory_order_release);
  204. lock_count_to_restore = 0;
  205. return Mode::Unlocked;
  206. }
  207. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
  208. this, it->value, m_times_locked);
  209. VERIFY(it->value > 0);
  210. lock_count_to_restore = it->value;
  211. VERIFY(lock_count_to_restore > 0);
  212. #if LOCK_DEBUG
  213. m_holder->holding_lock(*this, -(int)lock_count_to_restore, {});
  214. #endif
  215. m_shared_holders.remove(it);
  216. VERIFY(m_times_locked >= lock_count_to_restore);
  217. m_times_locked -= lock_count_to_restore;
  218. if (m_times_locked == 0) {
  219. m_mode = Mode::Unlocked;
  220. m_queue.should_block(false);
  221. }
  222. m_lock.store(false, AK::memory_order_release);
  223. previous_mode = Mode::Shared;
  224. break;
  225. }
  226. case Mode::Unlocked: {
  227. m_lock.store(false, AK::memory_order_relaxed);
  228. lock_count_to_restore = 0;
  229. previous_mode = Mode::Unlocked;
  230. break;
  231. }
  232. default:
  233. VERIFY_NOT_REACHED();
  234. }
  235. m_queue.wake_one();
  236. return previous_mode;
  237. }
  238. // I don't know *who* is using "m_lock", so just yield.
  239. Scheduler::yield_from_critical();
  240. }
  241. }
  242. #if LOCK_DEBUG
  243. void Lock::restore_lock(Mode mode, u32 lock_count, const SourceLocation& location)
  244. #else
  245. void Lock::restore_lock(Mode mode, u32 lock_count)
  246. #endif
  247. {
  248. VERIFY(mode != Mode::Unlocked);
  249. VERIFY(lock_count > 0);
  250. VERIFY(!Processor::current().in_irq());
  251. auto current_thread = Thread::current();
  252. ScopedCritical critical; // in case we're not in a critical section already
  253. for (;;) {
  254. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  255. switch (mode) {
  256. case Mode::Exclusive: {
  257. auto expected_mode = Mode::Unlocked;
  258. if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive))
  259. break;
  260. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
  261. VERIFY(m_times_locked == 0);
  262. m_times_locked = lock_count;
  263. VERIFY(!m_holder);
  264. VERIFY(m_shared_holders.is_empty());
  265. m_holder = current_thread;
  266. m_queue.should_block(true);
  267. m_lock.store(false, AK::memory_order_release);
  268. #if LOCK_DEBUG
  269. m_holder->holding_lock(*this, (int)lock_count, location);
  270. #endif
  271. return;
  272. }
  273. case Mode::Shared: {
  274. auto expected_mode = Mode::Unlocked;
  275. if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared)
  276. break;
  277. dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
  278. this, mode_to_string(mode), lock_count, mode_to_string(expected_mode));
  279. VERIFY(expected_mode == Mode::Shared || m_times_locked == 0);
  280. m_times_locked += lock_count;
  281. VERIFY(!m_holder);
  282. VERIFY((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
  283. auto set_result = m_shared_holders.set(current_thread, lock_count);
  284. // There may be other shared lock holders already, but we should not have an entry yet
  285. VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
  286. m_queue.should_block(true);
  287. m_lock.store(false, AK::memory_order_release);
  288. #if LOCK_DEBUG
  289. m_holder->holding_lock(*this, (int)lock_count, location);
  290. #endif
  291. return;
  292. }
  293. default:
  294. VERIFY_NOT_REACHED();
  295. }
  296. m_lock.store(false, AK::memory_order_relaxed);
  297. }
  298. // I don't know *who* is using "m_lock", so just yield.
  299. Scheduler::yield_from_critical();
  300. }
  301. }
  302. void Lock::clear_waiters()
  303. {
  304. VERIFY(m_mode != Mode::Shared);
  305. m_queue.wake_all();
  306. }
  307. }