Spinlock.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Atomic.h>
  8. #include <AK/Types.h>
  9. #include <Kernel/Arch/x86/Processor.h>
  10. #include <Kernel/Forward.h>
  11. namespace Kernel {
  12. template<typename BaseType = u32>
  13. class Spinlock {
  14. AK_MAKE_NONCOPYABLE(Spinlock);
  15. AK_MAKE_NONMOVABLE(Spinlock);
  16. public:
  17. Spinlock() = default;
  18. ALWAYS_INLINE u32 lock()
  19. {
  20. u32 prev_flags = cpu_flags();
  21. Processor::enter_critical();
  22. cli();
  23. while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
  24. Processor::wait_check();
  25. }
  26. return prev_flags;
  27. }
  28. ALWAYS_INLINE void unlock(u32 prev_flags)
  29. {
  30. VERIFY(is_locked());
  31. m_lock.store(0, AK::memory_order_release);
  32. if (prev_flags & 0x200)
  33. sti();
  34. else
  35. cli();
  36. Processor::leave_critical();
  37. }
  38. [[nodiscard]] ALWAYS_INLINE bool is_locked() const
  39. {
  40. return m_lock.load(AK::memory_order_relaxed) != 0;
  41. }
  42. ALWAYS_INLINE void initialize()
  43. {
  44. m_lock.store(0, AK::memory_order_relaxed);
  45. }
  46. private:
  47. Atomic<BaseType> m_lock { 0 };
  48. };
  49. class RecursiveSpinlock {
  50. AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
  51. AK_MAKE_NONMOVABLE(RecursiveSpinlock);
  52. public:
  53. RecursiveSpinlock() = default;
  54. ALWAYS_INLINE u32 lock()
  55. {
  56. u32 prev_flags = cpu_flags();
  57. cli();
  58. Processor::enter_critical();
  59. auto& proc = Processor::current();
  60. FlatPtr cpu = FlatPtr(&proc);
  61. FlatPtr expected = 0;
  62. while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
  63. if (expected == cpu)
  64. break;
  65. Processor::wait_check();
  66. expected = 0;
  67. }
  68. m_recursions++;
  69. return prev_flags;
  70. }
  71. ALWAYS_INLINE void unlock(u32 prev_flags)
  72. {
  73. VERIFY(m_recursions > 0);
  74. VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
  75. if (--m_recursions == 0)
  76. m_lock.store(0, AK::memory_order_release);
  77. if (prev_flags & 0x200)
  78. sti();
  79. else
  80. cli();
  81. Processor::leave_critical();
  82. }
  83. [[nodiscard]] ALWAYS_INLINE bool is_locked() const
  84. {
  85. return m_lock.load(AK::memory_order_relaxed) != 0;
  86. }
  87. [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
  88. {
  89. return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
  90. }
  91. ALWAYS_INLINE void initialize()
  92. {
  93. m_lock.store(0, AK::memory_order_relaxed);
  94. }
  95. private:
  96. Atomic<FlatPtr> m_lock { 0 };
  97. u32 m_recursions { 0 };
  98. };
  99. template<typename LockType>
  100. class [[nodiscard]] SpinlockLocker {
  101. AK_MAKE_NONCOPYABLE(SpinlockLocker);
  102. public:
  103. SpinlockLocker() = delete;
  104. SpinlockLocker& operator=(SpinlockLocker&&) = delete;
  105. SpinlockLocker(LockType& lock)
  106. : m_lock(&lock)
  107. {
  108. VERIFY(m_lock);
  109. m_prev_flags = m_lock->lock();
  110. m_have_lock = true;
  111. }
  112. SpinlockLocker(SpinlockLocker&& from)
  113. : m_lock(from.m_lock)
  114. , m_prev_flags(from.m_prev_flags)
  115. , m_have_lock(from.m_have_lock)
  116. {
  117. from.m_lock = nullptr;
  118. from.m_prev_flags = 0;
  119. from.m_have_lock = false;
  120. }
  121. ~SpinlockLocker()
  122. {
  123. if (m_lock && m_have_lock) {
  124. m_lock->unlock(m_prev_flags);
  125. }
  126. }
  127. ALWAYS_INLINE void lock()
  128. {
  129. VERIFY(m_lock);
  130. VERIFY(!m_have_lock);
  131. m_prev_flags = m_lock->lock();
  132. m_have_lock = true;
  133. }
  134. ALWAYS_INLINE void unlock()
  135. {
  136. VERIFY(m_lock);
  137. VERIFY(m_have_lock);
  138. m_lock->unlock(m_prev_flags);
  139. m_prev_flags = 0;
  140. m_have_lock = false;
  141. }
  142. [[nodiscard]] ALWAYS_INLINE bool have_lock() const
  143. {
  144. return m_have_lock;
  145. }
  146. private:
  147. LockType* m_lock { nullptr };
  148. u32 m_prev_flags { 0 };
  149. bool m_have_lock { false };
  150. };
  151. }