瀏覽代碼

Kernel: Move spinlock into Arch

Spinlocks are tied to the platform they are built for, this is why they
have been moved into the Arch folder. They are still available via
"Locking/Spinlock.h"

An Aarch64 stub has been created
James Mintram 3 年之前
父節點
當前提交
e8f09279d3
共有 4 個文件被更改,包括 229 次插入118 次删除
  1. 17 0
      Kernel/Arch/Spinlock.h
  2. 78 0
      Kernel/Arch/aarch64/Spinlock.h
  3. 133 0
      Kernel/Arch/x86/Spinlock.h
  4. 1 118
      Kernel/Locking/Spinlock.h

+ 17 - 0
Kernel/Arch/Spinlock.h

@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Platform.h>
+
+#if ARCH(X86_64) || ARCH(I386)
+#    include <Kernel/Arch/x86/Spinlock.h>
+#elif ARCH(AARCH64)
+#    include <Kernel/Arch/aarch64/Spinlock.h>
+#else
+#    error "Unknown architecture"
+#endif

+ 78 - 0
Kernel/Arch/aarch64/Spinlock.h

@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Noncopyable.h>
+#include <AK/Types.h>
+#include <Kernel/Locking/LockRank.h>
+
+namespace Kernel {
+
+class Spinlock {
+    AK_MAKE_NONCOPYABLE(Spinlock);
+    AK_MAKE_NONMOVABLE(Spinlock);
+
+public:
+    Spinlock(LockRank rank = LockRank::None)
+    {
+        (void)rank;
+    }
+
+    ALWAYS_INLINE u32 lock()
+    {
+        return 0;
+    }
+
+    ALWAYS_INLINE void unlock(u32 /*prev_flags*/)
+    {
+    }
+
+    [[nodiscard]] ALWAYS_INLINE bool is_locked() const
+    {
+        return false;
+    }
+
+    ALWAYS_INLINE void initialize()
+    {
+    }
+};
+
+class RecursiveSpinlock {
+    AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
+    AK_MAKE_NONMOVABLE(RecursiveSpinlock);
+
+public:
+    RecursiveSpinlock(LockRank rank = LockRank::None)
+    {
+        (void)rank;
+    }
+
+    ALWAYS_INLINE u32 lock()
+    {
+        return 0;
+    }
+
+    ALWAYS_INLINE void unlock(u32 /*prev_flags*/)
+    {
+    }
+
+    [[nodiscard]] ALWAYS_INLINE bool is_locked() const
+    {
+        return false;
+    }
+
+    [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
+    {
+        return false;
+    }
+
+    ALWAYS_INLINE void initialize()
+    {
+    }
+};
+
+}

+ 133 - 0
Kernel/Arch/x86/Spinlock.h

@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <Kernel/Arch/Processor.h>
+#include <Kernel/Locking/LockRank.h>
+
+namespace Kernel {
+
+class Spinlock {
+    AK_MAKE_NONCOPYABLE(Spinlock);
+    AK_MAKE_NONMOVABLE(Spinlock);
+
+public:
+    Spinlock(LockRank rank = LockRank::None)
+        : m_rank(rank)
+    {
+    }
+
+    ALWAYS_INLINE u32 lock()
+    {
+        u32 prev_flags = cpu_flags();
+        Processor::enter_critical();
+        cli();
+        while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
+            Processor::wait_check();
+        }
+        track_lock_acquire(m_rank);
+        return prev_flags;
+        return 0;
+    }
+
+    ALWAYS_INLINE void unlock(u32 prev_flags)
+    {
+        VERIFY(is_locked());
+        track_lock_release(m_rank);
+        m_lock.store(0, AK::memory_order_release);
+        if (prev_flags & 0x200)
+            sti();
+        else
+            cli();
+
+        Processor::leave_critical();
+    }
+
+    [[nodiscard]] ALWAYS_INLINE bool is_locked() const
+    {
+        return m_lock.load(AK::memory_order_relaxed) != 0;
+    }
+
+    ALWAYS_INLINE void initialize()
+    {
+        m_lock.store(0, AK::memory_order_relaxed);
+    }
+
+private:
+    Atomic<u8> m_lock { 0 };
+    const LockRank m_rank;
+};
+
+class RecursiveSpinlock {
+    AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
+    AK_MAKE_NONMOVABLE(RecursiveSpinlock);
+
+public:
+    RecursiveSpinlock(LockRank rank = LockRank::None)
+        : m_rank(rank)
+    {
+    }
+
+    ALWAYS_INLINE u32 lock()
+    {
+        u32 prev_flags = cpu_flags();
+        cli();
+        Processor::enter_critical();
+        auto& proc = Processor::current();
+        FlatPtr cpu = FlatPtr(&proc);
+        FlatPtr expected = 0;
+        while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
+            if (expected == cpu)
+                break;
+            Processor::wait_check();
+            expected = 0;
+        }
+        if (m_recursions == 0)
+            track_lock_acquire(m_rank);
+        m_recursions++;
+        return prev_flags;
+        return 0;
+    }
+
+    ALWAYS_INLINE void unlock(u32 prev_flags)
+    {
+        VERIFY(m_recursions > 0);
+        VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
+        if (--m_recursions == 0) {
+            track_lock_release(m_rank);
+            m_lock.store(0, AK::memory_order_release);
+        }
+        if (prev_flags & 0x200)
+            sti();
+        else
+            cli();
+
+        Processor::leave_critical();
+    }
+
+    [[nodiscard]] ALWAYS_INLINE bool is_locked() const
+    {
+        return m_lock.load(AK::memory_order_relaxed) != 0;
+    }
+
+    [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
+    {
+        return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
+    }
+
+    ALWAYS_INLINE void initialize()
+    {
+        m_lock.store(0, AK::memory_order_relaxed);
+    }
+
+private:
+    Atomic<FlatPtr> m_lock { 0 };
+    u32 m_recursions { 0 };
+    const LockRank m_rank;
+};
+
+}

+ 1 - 118
Kernel/Locking/Spinlock.h

@@ -8,128 +8,11 @@
 
 #include <AK/Atomic.h>
 #include <AK/Types.h>
-#include <Kernel/Arch/Processor.h>
+#include <Kernel/Arch/Spinlock.h>
 #include <Kernel/Locking/LockRank.h>
 
 namespace Kernel {
 
-class Spinlock {
-    AK_MAKE_NONCOPYABLE(Spinlock);
-    AK_MAKE_NONMOVABLE(Spinlock);
-
-public:
-    Spinlock(LockRank rank = LockRank::None)
-        : m_rank(rank)
-    {
-    }
-
-    ALWAYS_INLINE u32 lock()
-    {
-        u32 prev_flags = cpu_flags();
-        Processor::enter_critical();
-        cli();
-        while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
-            Processor::wait_check();
-        }
-        track_lock_acquire(m_rank);
-        return prev_flags;
-    }
-
-    ALWAYS_INLINE void unlock(u32 prev_flags)
-    {
-        VERIFY(is_locked());
-        track_lock_release(m_rank);
-        m_lock.store(0, AK::memory_order_release);
-        if (prev_flags & 0x200)
-            sti();
-        else
-            cli();
-
-        Processor::leave_critical();
-    }
-
-    [[nodiscard]] ALWAYS_INLINE bool is_locked() const
-    {
-        return m_lock.load(AK::memory_order_relaxed) != 0;
-    }
-
-    ALWAYS_INLINE void initialize()
-    {
-        m_lock.store(0, AK::memory_order_relaxed);
-    }
-
-private:
-    Atomic<u8> m_lock { 0 };
-    const LockRank m_rank;
-};
-
-class RecursiveSpinlock {
-    AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
-    AK_MAKE_NONMOVABLE(RecursiveSpinlock);
-
-public:
-    RecursiveSpinlock(LockRank rank = LockRank::None)
-        : m_rank(rank)
-    {
-    }
-
-    ALWAYS_INLINE u32 lock()
-    {
-        u32 prev_flags = cpu_flags();
-        cli();
-        Processor::enter_critical();
-        auto& proc = Processor::current();
-        FlatPtr cpu = FlatPtr(&proc);
-        FlatPtr expected = 0;
-        while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
-            if (expected == cpu)
-                break;
-            Processor::wait_check();
-            expected = 0;
-        }
-        if (m_recursions == 0)
-            track_lock_acquire(m_rank);
-        m_recursions++;
-        return prev_flags;
-    }
-
-    ALWAYS_INLINE void unlock(u32 prev_flags)
-    {
-        VERIFY(m_recursions > 0);
-        VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
-        if (--m_recursions == 0) {
-            track_lock_release(m_rank);
-            m_lock.store(0, AK::memory_order_release);
-        }
-        if (prev_flags & 0x200)
-            sti();
-        else
-            cli();
-
-        Processor::leave_critical();
-    }
-
-    [[nodiscard]] ALWAYS_INLINE bool is_locked() const
-    {
-        return m_lock.load(AK::memory_order_relaxed) != 0;
-    }
-
-    [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
-    {
-        return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
-    }
-
-    ALWAYS_INLINE void initialize()
-    {
-        m_lock.store(0, AK::memory_order_relaxed);
-    }
-
-private:
-    Atomic<FlatPtr> m_lock { 0 };
-    u32 m_recursions { 0 };
-    const LockRank m_rank;
-};
-
 template<typename LockType>
 class [[nodiscard]] SpinlockLocker {
     AK_MAKE_NONCOPYABLE(SpinlockLocker);