浏览代码

Kernel: Add formal Processor::verify_no_spinlocks_held() API

In a few places we check `!Processor::in_critical()` to validate
that the current processor doesn't hold any kernel spinlocks.

Instead lets provide it a first class name for readability.
I'll also be adding more of these, so I would rather add more
usages of a nice API instead of this implicit/assumed logic.
Brian Gianforcaro 2 年之前
父节点
当前提交
2079728a74
共有 3 个文件被更改,包括 20 次插入10 次删除
  1. 5 0
      Kernel/Arch/aarch64/Processor.h
  2. 5 0
      Kernel/Arch/x86/Processor.h
  3. 10 10
      Kernel/Heap/kmalloc.cpp

+ 5 - 0
Kernel/Arch/aarch64/Processor.h

@@ -147,6 +147,11 @@ public:
         return current().m_in_critical;
     }
 
+    ALWAYS_INLINE static void verify_no_spinlocks_held()
+    {
+        VERIFY(!Processor::in_critical());
+    }
+
     // FIXME: Actually return the idle thread once aarch64 supports threading.
     ALWAYS_INLINE static Thread* idle_thread()
     {

+ 5 - 0
Kernel/Arch/x86/Processor.h

@@ -370,6 +370,11 @@ public:
         return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
     }
 
+    ALWAYS_INLINE static void verify_no_spinlocks_held()
+    {
+        VERIFY(!Processor::in_critical());
+    }
+
     ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
 
     static void smp_enable();

+ 10 - 10
Kernel/Heap/kmalloc.cpp

@@ -410,14 +410,6 @@ void kmalloc_enable_expand()
     g_kmalloc_global->enable_expansion();
 }
 
-static inline void kmalloc_verify_nospinlock_held()
-{
-    // Catch bad callers allocating under spinlock.
-    if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
-        VERIFY(!Processor::in_critical());
-    }
-}
-
 UNMAP_AFTER_INIT void kmalloc_init()
 {
     // Zero out heap since it's placed after end_of_kernel_bss.
@@ -429,7 +421,11 @@ UNMAP_AFTER_INIT void kmalloc_init()
 
 void* kmalloc(size_t size)
 {
-    kmalloc_verify_nospinlock_held();
+    // Catch bad callers allocating under spinlock.
+    if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
+        Processor::verify_no_spinlocks_held();
+    }
+
     SpinlockLocker lock(s_lock);
     ++g_kmalloc_call_count;
 
@@ -472,7 +468,11 @@ void kfree_sized(void* ptr, size_t size)
 
     VERIFY(size > 0);
 
-    kmalloc_verify_nospinlock_held();
+    // Catch bad callers allocating under spinlock.
+    if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
+        Processor::verify_no_spinlocks_held();
+    }
+
     SpinlockLocker lock(s_lock);
     ++g_kfree_call_count;
     ++g_nested_kfree_calls;