Lock.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/TemporaryChange.h>
  27. #include <Kernel/Debug.h>
  28. #include <Kernel/KSyms.h>
  29. #include <Kernel/Lock.h>
  30. #include <Kernel/Thread.h>
  31. namespace Kernel {
  32. #if LOCK_DEBUG
  33. void Lock::lock(Mode mode)
  34. {
  35. lock("unknown", 0, mode);
  36. }
  37. void Lock::lock(const char* file, int line, Mode mode)
  38. #else
  39. void Lock::lock(Mode mode)
  40. #endif
  41. {
  42. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  43. // and also from within critical sections!
  44. ASSERT(!Processor::current().in_irq());
  45. ASSERT(mode != Mode::Unlocked);
  46. auto current_thread = Thread::current();
  47. ScopedCritical critical; // in case we're not in a critical section already
  48. for (;;) {
  49. if (m_lock.exchange(true, AK::memory_order_acq_rel) != false) {
  50. // I don't know *who* is using "m_lock", so just yield.
  51. Scheduler::yield_from_critical();
  52. continue;
  53. }
  54. // FIXME: Do not add new readers if writers are queued.
  55. Mode current_mode = m_mode;
  56. switch (current_mode) {
  57. case Mode::Unlocked: {
  58. dbgln<LOCK_TRACE_DEBUG>("Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
  59. m_mode = mode;
  60. ASSERT(!m_holder);
  61. ASSERT(m_shared_holders.is_empty());
  62. if (mode == Mode::Exclusive) {
  63. m_holder = current_thread;
  64. } else {
  65. ASSERT(mode == Mode::Shared);
  66. m_shared_holders.set(current_thread, 1);
  67. }
  68. ASSERT(m_times_locked == 0);
  69. m_times_locked++;
  70. #if LOCK_DEBUG
  71. current_thread->holding_lock(*this, 1, file, line);
  72. #endif
  73. m_queue.should_block(true);
  74. m_lock.store(false, AK::memory_order_release);
  75. return;
  76. }
  77. case Mode::Exclusive: {
  78. ASSERT(m_holder);
  79. if (m_holder != current_thread)
  80. break;
  81. ASSERT(m_shared_holders.is_empty());
  82. if constexpr (LOCK_TRACE_DEBUG) {
  83. if (mode == Mode::Exclusive)
  84. dbgln("Lock::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
  85. else
  86. dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
  87. }
  88. ASSERT(mode == Mode::Exclusive || mode == Mode::Shared);
  89. ASSERT(m_times_locked > 0);
  90. m_times_locked++;
  91. #if LOCK_DEBUG
  92. current_thread->holding_lock(*this, 1, file, line);
  93. #endif
  94. m_lock.store(false, AK::memory_order_release);
  95. return;
  96. }
  97. case Mode::Shared: {
  98. ASSERT(!m_holder);
  99. if (mode != Mode::Shared)
  100. break;
  101. dbgln<LOCK_TRACE_DEBUG>("Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
  102. ASSERT(m_times_locked > 0);
  103. m_times_locked++;
  104. ASSERT(!m_shared_holders.is_empty());
  105. auto it = m_shared_holders.find(current_thread);
  106. if (it != m_shared_holders.end())
  107. it->value++;
  108. else
  109. m_shared_holders.set(current_thread, 1);
  110. #if LOCK_DEBUG
  111. current_thread->holding_lock(*this, 1, file, line);
  112. #endif
  113. m_lock.store(false, AK::memory_order_release);
  114. return;
  115. }
  116. default:
  117. ASSERT_NOT_REACHED();
  118. }
  119. m_lock.store(false, AK::memory_order_release);
  120. dbgln<LOCK_TRACE_DEBUG>("Lock::lock @ {} ({}) waiting...", this, m_name);
  121. m_queue.wait_on({}, m_name);
  122. dbgln<LOCK_TRACE_DEBUG>("Lock::lock @ {} ({}) waited", this, m_name);
  123. }
  124. }
  125. void Lock::unlock()
  126. {
  127. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  128. // and also from within critical sections!
  129. ASSERT(!Processor::current().in_irq());
  130. auto current_thread = Thread::current();
  131. ScopedCritical critical; // in case we're not in a critical section already
  132. for (;;) {
  133. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  134. Mode current_mode = m_mode;
  135. if constexpr (LOCK_TRACE_DEBUG) {
  136. if (current_mode == Mode::Shared)
  137. dbgln("Lock::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
  138. else
  139. dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
  140. }
  141. ASSERT(current_mode != Mode::Unlocked);
  142. ASSERT(m_times_locked > 0);
  143. m_times_locked--;
  144. switch (current_mode) {
  145. case Mode::Exclusive:
  146. ASSERT(m_holder == current_thread);
  147. ASSERT(m_shared_holders.is_empty());
  148. if (m_times_locked == 0)
  149. m_holder = nullptr;
  150. break;
  151. case Mode::Shared: {
  152. ASSERT(!m_holder);
  153. auto it = m_shared_holders.find(current_thread);
  154. ASSERT(it != m_shared_holders.end());
  155. if (it->value > 1) {
  156. it->value--;
  157. } else {
  158. ASSERT(it->value > 0);
  159. m_shared_holders.remove(it);
  160. }
  161. break;
  162. }
  163. default:
  164. ASSERT_NOT_REACHED();
  165. }
  166. bool unlocked_last = (m_times_locked == 0);
  167. if (unlocked_last) {
  168. ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
  169. m_mode = Mode::Unlocked;
  170. m_queue.should_block(false);
  171. }
  172. #if LOCK_DEBUG
  173. current_thread->holding_lock(*this, -1);
  174. #endif
  175. m_lock.store(false, AK::memory_order_release);
  176. if (unlocked_last) {
  177. u32 did_wake = m_queue.wake_one();
  178. dbgln<LOCK_TRACE_DEBUG>("Lock::unlock @ {} ({}) wake one ({})", this, m_name, did_wake);
  179. }
  180. return;
  181. }
  182. // I don't know *who* is using "m_lock", so just yield.
  183. Scheduler::yield_from_critical();
  184. }
  185. }
  186. auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
  187. {
  188. // NOTE: This may be called from an interrupt handler (not an IRQ handler)
  189. // and also from within critical sections!
  190. ASSERT(!Processor::current().in_irq());
  191. auto current_thread = Thread::current();
  192. ScopedCritical critical; // in case we're not in a critical section already
  193. for (;;) {
  194. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  195. Mode previous_mode;
  196. auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
  197. switch (current_mode) {
  198. case Mode::Exclusive: {
  199. if (m_holder != current_thread) {
  200. m_lock.store(false, AK::MemoryOrder::memory_order_release);
  201. lock_count_to_restore = 0;
  202. return Mode::Unlocked;
  203. }
  204. dbgln<LOCK_RESTORE_DEBUG>("Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
  205. #if LOCK_DEBUG
  206. m_holder->holding_lock(*this, -(int)lock_count_to_restore);
  207. #endif
  208. m_holder = nullptr;
  209. ASSERT(m_times_locked > 0);
  210. lock_count_to_restore = m_times_locked;
  211. m_times_locked = 0;
  212. m_mode = Mode::Unlocked;
  213. m_queue.should_block(false);
  214. m_lock.store(false, AK::memory_order_release);
  215. previous_mode = Mode::Exclusive;
  216. break;
  217. }
  218. case Mode::Shared: {
  219. ASSERT(!m_holder);
  220. auto it = m_shared_holders.find(current_thread);
  221. if (it == m_shared_holders.end()) {
  222. m_lock.store(false, AK::MemoryOrder::memory_order_release);
  223. lock_count_to_restore = 0;
  224. return Mode::Unlocked;
  225. }
  226. dbgln<LOCK_RESTORE_DEBUG>("Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
  227. this, it->value, m_times_locked);
  228. ASSERT(it->value > 0);
  229. lock_count_to_restore = it->value;
  230. ASSERT(lock_count_to_restore > 0);
  231. #if LOCK_DEBUG
  232. m_holder->holding_lock(*this, -(int)lock_count_to_restore);
  233. #endif
  234. m_shared_holders.remove(it);
  235. ASSERT(m_times_locked >= lock_count_to_restore);
  236. m_times_locked -= lock_count_to_restore;
  237. if (m_times_locked == 0) {
  238. m_mode = Mode::Unlocked;
  239. m_queue.should_block(false);
  240. }
  241. m_lock.store(false, AK::memory_order_release);
  242. previous_mode = Mode::Shared;
  243. break;
  244. }
  245. case Mode::Unlocked: {
  246. m_lock.store(false, AK::memory_order_relaxed);
  247. lock_count_to_restore = 0;
  248. previous_mode = Mode::Unlocked;
  249. break;
  250. }
  251. default:
  252. ASSERT_NOT_REACHED();
  253. }
  254. m_queue.wake_one();
  255. return previous_mode;
  256. }
  257. // I don't know *who* is using "m_lock", so just yield.
  258. Scheduler::yield_from_critical();
  259. }
  260. }
  261. #if LOCK_DEBUG
  262. void Lock::restore_lock(Mode mode, u32 lock_count)
  263. {
  264. return restore_lock("unknown", 0, mode, lock_count);
  265. }
  266. void Lock::restore_lock(const char* file, int line, Mode mode, u32 lock_count)
  267. #else
  268. void Lock::restore_lock(Mode mode, u32 lock_count)
  269. #endif
  270. {
  271. ASSERT(mode != Mode::Unlocked);
  272. ASSERT(lock_count > 0);
  273. ASSERT(!Processor::current().in_irq());
  274. auto current_thread = Thread::current();
  275. ScopedCritical critical; // in case we're not in a critical section already
  276. for (;;) {
  277. if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
  278. switch (mode) {
  279. case Mode::Exclusive: {
  280. auto expected_mode = Mode::Unlocked;
  281. if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive))
  282. break;
  283. dbgln<LOCK_RESTORE_DEBUG>("Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
  284. ASSERT(m_times_locked == 0);
  285. m_times_locked = lock_count;
  286. ASSERT(!m_holder);
  287. ASSERT(m_shared_holders.is_empty());
  288. m_holder = current_thread;
  289. m_queue.should_block(true);
  290. m_lock.store(false, AK::memory_order_release);
  291. #if LOCK_DEBUG
  292. m_holder->holding_lock(*this, (int)lock_count, file, line);
  293. #endif
  294. return;
  295. }
  296. case Mode::Shared: {
  297. auto expected_mode = Mode::Unlocked;
  298. if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared)
  299. break;
  300. dbgln<LOCK_RESTORE_DEBUG>("Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
  301. this, mode_to_string(mode), lock_count, mode_to_string(expected_mode));
  302. ASSERT(expected_mode == Mode::Shared || m_times_locked == 0);
  303. m_times_locked += lock_count;
  304. ASSERT(!m_holder);
  305. ASSERT((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
  306. auto set_result = m_shared_holders.set(current_thread, lock_count);
  307. // There may be other shared lock holders already, but we should not have an entry yet
  308. ASSERT(set_result == AK::HashSetResult::InsertedNewEntry);
  309. m_queue.should_block(true);
  310. m_lock.store(false, AK::memory_order_release);
  311. #if LOCK_DEBUG
  312. m_holder->holding_lock(*this, (int)lock_count, file, line);
  313. #endif
  314. return;
  315. }
  316. default:
  317. ASSERT_NOT_REACHED();
  318. }
  319. m_lock.store(false, AK::memory_order_relaxed);
  320. }
  321. // I don't know *who* is using "m_lock", so just yield.
  322. Scheduler::yield_from_critical();
  323. }
  324. }
  325. void Lock::clear_waiters()
  326. {
  327. ASSERT(m_mode != Mode::Shared);
  328. m_queue.wake_all();
  329. }
  330. }