Browse Source

Kernel: Fix Thread::relock_process leaving critical section

We don't want to explicitly enable interrupts when leaving the
critical section to trigger a context switch.
Tom 4 years ago
parent
commit
e9e76b8074
2 changed files with 11 additions and 9 deletions
  1. 6 3
      Kernel/Scheduler.cpp
  2. 5 6
      Kernel/Thread.cpp

+ 6 - 3
Kernel/Scheduler.cpp

@@ -583,16 +583,19 @@ void Scheduler::invoke_async()
 void Scheduler::yield_from_critical()
 void Scheduler::yield_from_critical()
 {
 {
     auto& proc = Processor::current();
     auto& proc = Processor::current();
-    ASSERT(proc.in_critical());
+    auto before_critical = proc.in_critical();
+    ASSERT(before_critical);
     ASSERT(!proc.in_irq());
     ASSERT(!proc.in_irq());
 
 
     yield(); // Flag a context switch
     yield(); // Flag a context switch
 
 
     u32 prev_flags;
     u32 prev_flags;
-    u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
+    u32 prev_crit = proc.clear_critical(prev_flags, false);
 
 
     // Note, we may now be on a different CPU!
     // Note, we may now be on a different CPU!
-    Processor::current().restore_critical(prev_crit, prev_flags);
+    auto& new_proc = Processor::current();
+    new_proc.restore_critical(prev_crit, prev_flags);
+    ASSERT(before_critical == new_proc.in_critical());
 }
 }
 
 
 void Scheduler::notify_finalizer()
 void Scheduler::notify_finalizer()

+ 5 - 6
Kernel/Thread.cpp

@@ -305,17 +305,16 @@ void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
     // flagged by calling Scheduler::donate_to or Scheduler::yield
     // flagged by calling Scheduler::donate_to or Scheduler::yield
     // above. We have to do it this way because we intentionally
     // above. We have to do it this way because we intentionally
     // leave the critical section here to be able to switch contexts.
     // leave the critical section here to be able to switch contexts.
-    u32 prev_flags;
-    u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
-
-    // CONTEXT SWITCH HAPPENS HERE!
+    auto critical_before = Processor::current().in_critical();
+    ASSERT(critical_before);
 
 
-    // NOTE: We may be on a different CPU now!
-    Processor::current().restore_critical(prev_crit, prev_flags);
+    Scheduler::yield_from_critical();
 
 
+    ASSERT(Processor::current().in_critical() == critical_before);
     if (previous_locked != LockMode::Unlocked) {
     if (previous_locked != LockMode::Unlocked) {
         // We've unblocked, relock the process if needed and carry on.
         // We've unblocked, relock the process if needed and carry on.
         RESTORE_LOCK(process().big_lock(), previous_locked, lock_count_to_restore);
         RESTORE_LOCK(process().big_lock(), previous_locked, lock_count_to_restore);
+        ASSERT(Processor::current().in_critical() == critical_before);
     }
     }
 }
 }