Переглянути джерело

Kernel/SMP: Change critical sections to not disable interrupts

Leave interrupts enabled so that we can still process IRQs. Critical
sections should only prevent preemption by another thread.

Co-authored-by: Tom <tomut@yahoo.com>
Andreas Kling 4 роки тому
батько
коміт
0a02496f04

+ 8 - 33
Kernel/Arch/x86/Processor.h

@@ -339,17 +339,13 @@ public:
         write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
     }
 
-    ALWAYS_INLINE static void enter_critical(u32& prev_flags)
+    ALWAYS_INLINE static void enter_critical()
     {
-        prev_flags = cpu_flags();
-        cli();
-        // NOTE: Up until this point we *could* have been preempted.
-        //       Now interrupts are disabled, so calling current() is safe.
-        AK::atomic_fetch_add(&current().m_in_critical, 1u, AK::MemoryOrder::memory_order_relaxed);
+        write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
     }
 
 private:
-    ALWAYS_INLINE void do_leave_critical(u32 prev_flags)
+    ALWAYS_INLINE void do_leave_critical()
     {
         VERIFY(m_in_critical > 0);
         if (m_in_critical == 1) {
@@ -363,52 +359,31 @@ private:
         } else {
             m_in_critical = m_in_critical - 1;
         }
-        if (prev_flags & 0x200)
-            sti();
-        else
-            cli();
     }
 
 public:
-    ALWAYS_INLINE static void leave_critical(u32 prev_flags)
+    ALWAYS_INLINE static void leave_critical()
     {
-        cli(); // Need to prevent IRQs from interrupting us here!
-        // NOTE: Up until this point we *could* have been preempted!
-        // Now interrupts are disabled, so calling current() is safe
-        current().do_leave_critical(prev_flags);
+        current().do_leave_critical();
     }
 
-    ALWAYS_INLINE static u32 clear_critical(u32& prev_flags, bool enable_interrupts)
+    ALWAYS_INLINE static u32 clear_critical()
     {
-        cli();
-        // NOTE: Up until this point we *could* have been preempted!
-        // Now interrupts are disabled, so calling current() is safe
-        // This doesn't have to be atomic, and it's also fine if we
-        // were to be preempted in between these steps (which should
-        // not happen due to the cli call), but if we moved to another
-        // processors m_in_critical would move along with us
-        auto prev_critical = read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
+        auto prev_critical = in_critical();
         write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
         auto& proc = current();
         if (!proc.m_in_irq)
             proc.check_invoke_scheduler();
-        if (enable_interrupts || (prev_flags & 0x200))
-            sti();
         return prev_critical;
     }
 
-    ALWAYS_INLINE static void restore_critical(u32 prev_critical, u32 prev_flags)
+    ALWAYS_INLINE static void restore_critical(u32 prev_critical)
     {
         // NOTE: This doesn't have to be atomic, and it's also fine if we
         // get preempted in between these steps. If we move to another
         // processors m_in_critical will move along with us. And if we
         // are preempted, we would resume with the same flags.
         write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
-        VERIFY(!prev_critical || !(prev_flags & 0x200));
-        if (prev_flags & 0x200)
-            sti();
-        else
-            cli();
     }
 
     ALWAYS_INLINE static u32 in_critical()

+ 3 - 6
Kernel/Arch/x86/ScopedCritical.h

@@ -28,15 +28,13 @@ public:
     }
 
     ScopedCritical(ScopedCritical&& from)
-        : m_prev_flags(exchange(from.m_prev_flags, 0))
-        , m_valid(exchange(from.m_valid, false))
+        : m_valid(exchange(from.m_valid, false))
     {
     }
 
     ScopedCritical& operator=(ScopedCritical&& from)
     {
         if (&from != this) {
-            m_prev_flags = exchange(from.m_prev_flags, 0);
             m_valid = exchange(from.m_valid, false);
         }
         return *this;
@@ -46,18 +44,17 @@ public:
     {
         VERIFY(m_valid);
         m_valid = false;
-        Processor::leave_critical(m_prev_flags);
+        Processor::leave_critical();
     }
 
     void enter()
     {
         VERIFY(!m_valid);
         m_valid = true;
-        Processor::enter_critical(m_prev_flags);
+        Processor::enter_critical();
     }
 
 private:
-    u32 m_prev_flags { 0 };
     bool m_valid { false };
 };
 

+ 4 - 5
Kernel/Arch/x86/common/Processor.cpp

@@ -658,9 +658,9 @@ void Processor::exit_trap(TrapFrame& trap)
 
 void Processor::check_invoke_scheduler()
 {
+    InterruptDisabler disabler;
     VERIFY(!m_in_irq);
     VERIFY(!m_in_critical);
-    VERIFY_INTERRUPTS_DISABLED();
     VERIFY(&Processor::current() == this);
     if (m_invoke_scheduler_async && m_scheduler_initialized) {
         m_invoke_scheduler_async = false;
@@ -732,7 +732,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
 
 u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
 {
-    VERIFY(Processor::in_critical());
+    VERIFY_INTERRUPTS_DISABLED();
     VERIFY(wake_count > 0);
     if (!s_smp_enabled)
         return 0;
@@ -817,8 +817,7 @@ bool Processor::smp_process_pending_messages()
     VERIFY(s_smp_enabled);
 
     bool did_process = false;
-    u32 prev_flags;
-    enter_critical(prev_flags);
+    enter_critical();
 
     if (auto pending_msgs = m_message_queue.exchange(nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
         // We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
@@ -882,7 +881,7 @@ bool Processor::smp_process_pending_messages()
         halt_this();
     }
 
-    leave_critical(prev_flags);
+    leave_critical();
     return did_process;
 }
 

+ 16 - 6
Kernel/Locking/SpinLock.h

@@ -23,8 +23,9 @@ public:
 
     ALWAYS_INLINE u32 lock()
     {
-        u32 prev_flags;
-        Processor::enter_critical(prev_flags);
+        u32 prev_flags = cpu_flags();
+        Processor::enter_critical();
+        cli();
         while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
             Processor::wait_check();
         }
@@ -35,7 +36,11 @@ public:
     {
         VERIFY(is_locked());
         m_lock.store(0, AK::memory_order_release);
-        Processor::leave_critical(prev_flags);
+        if (prev_flags & 0x200)
+            sti();
+        else
+            cli();
+        Processor::leave_critical();
     }
 
     [[nodiscard]] ALWAYS_INLINE bool is_locked() const
@@ -63,8 +68,9 @@ public:
     {
         auto& proc = Processor::current();
         FlatPtr cpu = FlatPtr(&proc);
-        u32 prev_flags;
-        Processor::enter_critical(prev_flags);
+        u32 prev_flags = cpu_flags();
+        Processor::enter_critical();
+        cli();
         FlatPtr expected = 0;
         while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
             if (expected == cpu)
@@ -82,7 +88,11 @@ public:
         VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
         if (--m_recursions == 0)
             m_lock.store(0, AK::memory_order_release);
-        Processor::leave_critical(prev_flags);
+        if (prev_flags & 0x200)
+            sti();
+        else
+            cli();
+        Processor::leave_critical();
     }
 
     [[nodiscard]] ALWAYS_INLINE bool is_locked() const

+ 6 - 2
Kernel/Syscalls/execve.cpp

@@ -630,7 +630,9 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
     // We enter a critical section here because we don't want to get interrupted between do_exec()
     // and Processor::assume_context() or the next context switch.
     // If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec().
-    Processor::current().enter_critical(prev_flags);
+    Processor::enter_critical();
+    prev_flags = cpu_flags();
+    cli();
 
     // NOTE: Be careful to not trigger any page faults below!
 
@@ -927,7 +929,9 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
         VERIFY_NOT_REACHED();
     }
 
-    Processor::leave_critical(prev_flags);
+    if (prev_flags & 0x200)
+        sti();
+    Processor::leave_critical();
     return KSuccess;
 }
 

+ 5 - 8
Kernel/Thread.cpp

@@ -388,8 +388,7 @@ void Thread::die_if_needed()
 
     // Now leave the critical section so that we can also trigger the
     // actual context switch
-    u32 prev_flags;
-    Processor::clear_critical(prev_flags, false);
+    Processor::clear_critical();
     dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq());
     // We should never get here, but the scoped scheduler lock
     // will be released by Scheduler::context_switch again
@@ -420,10 +419,9 @@ void Thread::yield_assuming_not_holding_big_lock()
     // Disable interrupts here. This ensures we don't accidentally switch contexts twice
     InterruptDisabler disable;
     Scheduler::yield(); // flag a switch
-    u32 prev_flags;
-    u32 prev_crit = Processor::clear_critical(prev_flags, true);
+    u32 prev_critical = Processor::clear_critical();
     // NOTE: We may be on a different CPU now!
-    Processor::restore_critical(prev_crit, prev_flags);
+    Processor::restore_critical(prev_critical);
 }
 
 void Thread::yield_and_release_relock_big_lock()
@@ -451,13 +449,12 @@ void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
     // flagged by calling Scheduler::yield above.
     // We have to do it this way because we intentionally
     // leave the critical section here to be able to switch contexts.
-    u32 prev_flags;
-    u32 prev_crit = Processor::clear_critical(prev_flags, true);
+    u32 prev_critical = Processor::clear_critical();
 
     // CONTEXT SWITCH HAPPENS HERE!
 
     // NOTE: We may be on a different CPU now!
-    Processor::restore_critical(prev_crit, prev_flags);
+    Processor::restore_critical(prev_critical);
 
     if (previous_locked != LockMode::Unlocked) {
         // We've unblocked, relock the process if needed and carry on.