Переглянути джерело

Kernel: Make entering and leaving critical sections atomic

We also need to store m_in_critical in the Thread upon switching,
and we need to restore it. This solves a problem where threads
moving between different processors could end up with an unexpected
value.
Tom 4 роки тому
батько
коміт
f88a8b16d7
3 змінених файлів з 25 додано та 13 видалено
  1. 3 0
      Kernel/Arch/i386/CPU.cpp
  2. 18 13
      Kernel/Arch/i386/CPU.h
  3. 4 0
      Kernel/Thread.h

+ 3 - 0
Kernel/Arch/i386/CPU.cpp

@@ -1291,6 +1291,7 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
         write_cr3(to_tss.cr3);
 
     to_thread->set_cpu(processor.id());
+    processor.restore_in_critical(to_thread->saved_critical());
 
     asm volatile("fxrstor %0"
         ::"m"(to_thread->fpu_state()));
@@ -1308,6 +1309,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
     ASSERT(is_kernel_mode());
 
     dbgln<CONTEXT_SWITCH_DEBUG>("switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
+    from_thread->save_critical(m_in_critical);
 
     // Switch to new thread context, passing from_thread and to_thread
     // through to the new context using registers edx and eax
@@ -1347,6 +1349,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
           [to_eip] "c" (to_thread->tss().eip),
           [from_thread] "d" (from_thread),
           [to_thread] "a" (to_thread)
+        : "memory"
     );
 
     dbgln<CONTEXT_SWITCH_DEBUG>("switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);

+ 18 - 13
Kernel/Arch/i386/CPU.h

@@ -708,7 +708,7 @@ class Processor {
 
     u32 m_cpu;
     u32 m_in_irq;
-    u32 m_in_critical;
+    Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_in_critical;
 
     TSS32 m_tss;
     static FPUState s_clean_fpu_state;
@@ -876,15 +876,16 @@ public:
     {
         ASSERT(prev_irq <= m_in_irq);
         if (!prev_irq) {
-            if (m_in_critical == 0) {
-                auto prev_critical = m_in_critical++;
+            u32 prev_critical = 0;
+            if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
                 m_in_irq = prev_irq;
                 deferred_call_execute_pending();
-                ASSERT(m_in_critical == prev_critical + 1);
-                m_in_critical = prev_critical;
-            }
-            if (!m_in_critical)
+                auto prev_raised = m_in_critical.exchange(prev_critical);
+                ASSERT(prev_raised == prev_critical + 1);
+                check_invoke_scheduler();
+            } else if (prev_critical == 0) {
                 check_invoke_scheduler();
+            }
         } else {
             m_in_irq = prev_irq;
         }
@@ -895,11 +896,16 @@ public:
         return m_in_irq;
     }
 
+    ALWAYS_INLINE void restore_in_critical(u32 critical)
+    {
+        m_in_critical = critical;
+    }
+
     ALWAYS_INLINE void enter_critical(u32& prev_flags)
     {
-        m_in_critical++;
         prev_flags = cpu_flags();
         cli();
+        m_in_critical++;
     }
 
     ALWAYS_INLINE void leave_critical(u32 prev_flags)
@@ -925,9 +931,8 @@ public:
 
     ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
     {
-        u32 prev_crit = m_in_critical;
-        m_in_critical = 0;
         prev_flags = cpu_flags();
+        u32 prev_crit = m_in_critical.exchange(0, AK::MemoryOrder::memory_order_acquire);
         if (!m_in_irq)
             check_invoke_scheduler();
         if (enable_interrupts)
@@ -937,15 +942,15 @@ public:
 
     ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
     {
-        ASSERT(m_in_critical == 0);
-        m_in_critical = prev_crit;
+        m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
+        ASSERT(!prev_crit || !(prev_flags & 0x200));
         if (prev_flags & 0x200)
             sti();
         else
             cli();
     }
 
-    ALWAYS_INLINE u32& in_critical() { return m_in_critical; }
+    ALWAYS_INLINE u32 in_critical() { return m_in_critical.load(); }
 
     ALWAYS_INLINE const FPUState& clean_fpu_state() const
     {

+ 4 - 0
Kernel/Thread.h

@@ -1065,6 +1065,9 @@ public:
         m_is_active.store(active, AK::memory_order_release);
     }
 
+    u32 saved_critical() const { return m_saved_critical; }
+    void save_critical(u32 critical) { m_saved_critical = critical; }
+
     [[nodiscard]] bool is_active() const
     {
         return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
@@ -1239,6 +1242,7 @@ private:
     ThreadID m_tid { -1 };
     TSS32 m_tss;
     TrapFrame* m_current_trap { nullptr };
+    u32 m_saved_critical { 1 };
     Atomic<u32> m_cpu { 0 };
     u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
     u32 m_ticks_left { 0 };