Selaa lähdekoodia

Kernel: Move "in-scheduler" flag from SchedulerData to Processor

This avoids a race between getting the processor-specific SchedulerData
and accessing it. (Switching to a different CPU in that window means
that we're operating on the wrong SchedulerData.)

Co-authored-by: Tom <tomut@yahoo.com>
Andreas Kling 3 vuotta sitten
vanhempi
commit
d9da513959
2 muutettua tiedostoa jossa 24 lisäystä ja 22 poistoa
  1. 11 1
      Kernel/Arch/x86/Processor.h
  2. 13 21
      Kernel/Scheduler.cpp

+ 11 - 1
Kernel/Arch/x86/Processor.h

@@ -25,7 +25,6 @@ struct ProcessorMessageEntry;
 
 enum class ProcessorSpecificDataID {
     MemoryManager,
-    Scheduler,
     __Count,
 };
 
@@ -138,6 +137,7 @@ class Processor {
 
     bool m_invoke_scheduler_async;
     bool m_scheduler_initialized;
+    bool m_in_scheduler { true };
     Atomic<bool> m_halt_requested;
 
     DeferredCallEntry* m_pending_deferred_calls; // in reverse order
@@ -344,6 +344,16 @@ public:
         write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
     }
 
+    ALWAYS_INLINE static bool current_in_scheduler()
+    {
+        return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
+    }
+
+    ALWAYS_INLINE static void set_current_in_scheduler(bool value)
+    {
+        write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
+    }
+
 private:
     ALWAYS_INLINE void do_leave_critical()
     {

+ 13 - 21
Kernel/Scheduler.cpp

@@ -22,12 +22,6 @@
 
 namespace Kernel {
 
-struct SchedulerData {
-    static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::Scheduler; }
-
-    bool in_scheduler { true };
-};
-
 RecursiveSpinlock g_scheduler_lock;
 
 static u32 time_slice_for(const Thread& thread)
@@ -195,7 +189,6 @@ UNMAP_AFTER_INIT void Scheduler::start()
     g_scheduler_lock.lock();
 
     auto& processor = Processor::current();
-    ProcessorSpecific<SchedulerData>::initialize();
     VERIFY(processor.is_initialized());
     auto& idle_thread = *Processor::idle_thread();
     VERIFY(processor.current_thread() == &idle_thread);
@@ -217,14 +210,13 @@ bool Scheduler::pick_next()
     // prevents a recursive call into Scheduler::invoke_async upon
     // leaving the scheduler lock.
     ScopedCritical critical;
-    ProcessorSpecific<SchedulerData>::get().in_scheduler = true;
+    Processor::set_current_in_scheduler(true);
     ScopeGuard guard(
         []() {
             // We may be on a different processor after we got switched
             // back to this thread!
-            auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
-            VERIFY(scheduler_data.in_scheduler);
-            scheduler_data.in_scheduler = false;
+            VERIFY(Processor::current_in_scheduler());
+            Processor::set_current_in_scheduler(false);
         });
 
     SpinlockLocker lock(g_scheduler_lock);
@@ -361,9 +353,9 @@ void Scheduler::leave_on_first_switch(u32 flags)
     // Scheduler::context_switch is not in the call stack we need to
     // clean up and release locks manually here
     g_scheduler_lock.unlock(flags);
-    auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
-    VERIFY(scheduler_data.in_scheduler);
-    scheduler_data.in_scheduler = false;
+
+    VERIFY(Processor::current_in_scheduler());
+    Processor::set_current_in_scheduler(false);
 }
 
 void Scheduler::prepare_after_exec()
@@ -371,9 +363,9 @@ void Scheduler::prepare_after_exec()
     // This is called after exec() when doing a context "switch" into
     // the new process. This is called from Processor::assume_context
     VERIFY(g_scheduler_lock.own_lock());
-    auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
-    VERIFY(!scheduler_data.in_scheduler);
-    scheduler_data.in_scheduler = true;
+
+    VERIFY(!Processor::current_in_scheduler());
+    Processor::set_current_in_scheduler(true);
 }
 
 void Scheduler::prepare_for_idle_loop()
@@ -382,9 +374,9 @@ void Scheduler::prepare_for_idle_loop()
     // and is about to run it. We need to acquire he scheduler lock
     VERIFY(!g_scheduler_lock.own_lock());
     g_scheduler_lock.lock();
-    auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
-    VERIFY(!scheduler_data.in_scheduler);
-    scheduler_data.in_scheduler = true;
+
+    VERIFY(!Processor::current_in_scheduler());
+    Processor::set_current_in_scheduler(true);
 }
 
 Process* Scheduler::colonel()
@@ -517,7 +509,7 @@ void Scheduler::invoke_async()
     // Since this function is called when leaving critical sections (such
     // as a Spinlock), we need to check if we're not already doing this
     // to prevent recursion
-    if (!ProcessorSpecific<SchedulerData>::get().in_scheduler)
+    if (!Processor::current_in_scheduler())
         pick_next();
 }