Lock.cpp 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/TemporaryChange.h>
  27. #include <Kernel/KSyms.h>
  28. #include <Kernel/Lock.h>
  29. #include <Kernel/Thread.h>
  30. namespace Kernel {
  31. static bool modes_conflict(Lock::Mode mode1, Lock::Mode mode2)
  32. {
  33. if (mode1 == Lock::Mode::Unlocked || mode2 == Lock::Mode::Unlocked)
  34. return false;
  35. if (mode1 == Lock::Mode::Shared && mode2 == Lock::Mode::Shared)
  36. return false;
  37. return true;
  38. }
  39. void Lock::lock(Mode mode)
  40. {
  41. ASSERT(mode != Mode::Unlocked);
  42. if (!are_interrupts_enabled()) {
  43. klog() << "Interrupts disabled when trying to take Lock{" << m_name << "}";
  44. dump_backtrace();
  45. hang();
  46. }
  47. auto current_thread = Thread::current();
  48. for (;;) {
  49. bool expected = false;
  50. if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  51. do {
  52. // FIXME: Do not add new readers if writers are queued.
  53. bool modes_dont_conflict = !modes_conflict(m_mode, mode);
  54. bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
  55. if (modes_dont_conflict || already_hold_exclusive_lock) {
  56. // We got the lock!
  57. if (!already_hold_exclusive_lock)
  58. m_mode = mode;
  59. m_holder = current_thread;
  60. m_times_locked++;
  61. m_lock.store(false, AK::memory_order_release);
  62. return;
  63. }
  64. } while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked);
  65. } else if (Processor::current().in_critical()) {
  66. // If we're in a critical section and trying to lock, no context
  67. // switch will happen, so yield.
  68. // The assumption is that if we call this from a critical section
  69. // that we DO want to temporarily leave it
  70. u32 prev_flags;
  71. u32 prev_crit = Processor::current().clear_critical(prev_flags, !Processor::current().in_irq());
  72. Scheduler::yield();
  73. // Note, we may now be on a different CPU!
  74. Processor::current().restore_critical(prev_crit, prev_flags);
  75. }
  76. }
  77. }
  78. void Lock::unlock()
  79. {
  80. auto current_thread = Thread::current();
  81. for (;;) {
  82. bool expected = false;
  83. if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  84. ASSERT(m_times_locked);
  85. --m_times_locked;
  86. ASSERT(m_mode != Mode::Unlocked);
  87. if (m_mode == Mode::Exclusive)
  88. ASSERT(m_holder == current_thread);
  89. if (m_holder == current_thread && (m_mode == Mode::Shared || m_times_locked == 0))
  90. m_holder = nullptr;
  91. if (m_times_locked > 0) {
  92. m_lock.store(false, AK::memory_order_release);
  93. return;
  94. }
  95. m_mode = Mode::Unlocked;
  96. m_queue.wake_one(&m_lock);
  97. return;
  98. }
  99. // I don't know *who* is using "m_lock", so just yield.
  100. // The assumption is that if we call this from a critical section
  101. // that we DO want to temporarily leave it
  102. u32 prev_flags;
  103. u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
  104. Scheduler::yield();
  105. // Note, we may now be on a different CPU!
  106. Processor::current().restore_critical(prev_crit, prev_flags);
  107. }
  108. }
  109. bool Lock::force_unlock_if_locked()
  110. {
  111. ASSERT(m_mode != Mode::Shared);
  112. ScopedCritical critical;
  113. if (m_holder != Thread::current())
  114. return false;
  115. ASSERT(m_times_locked == 1);
  116. m_holder = nullptr;
  117. m_mode = Mode::Unlocked;
  118. m_times_locked = 0;
  119. m_queue.wake_one();
  120. return true;
  121. }
  122. void Lock::clear_waiters()
  123. {
  124. ASSERT(m_mode != Mode::Shared);
  125. ScopedCritical critical;
  126. m_queue.clear();
  127. }
  128. }