diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 04f9d0a18eb..98f6000d7a0 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -175,8 +175,6 @@ bool Scheduler::pick_next() { VERIFY_INTERRUPTS_DISABLED(); - auto current_thread = Thread::current(); - // Set the m_in_scheduler flag before acquiring the spinlock. This // prevents a recursive call into Scheduler::invoke_async upon // leaving the scheduler lock. @@ -194,22 +192,6 @@ bool Scheduler::pick_next() ScopedSpinLock lock(g_scheduler_lock); - if (current_thread->should_die() && current_thread->state() == Thread::Running) { - // Rather than immediately killing threads, yanking the kernel stack - // away from them (which can lead to e.g. reference leaks), we always - // allow Thread::wait_on to return. This allows the kernel stack to - // clean up and eventually we'll get here shortly before transitioning - // back to user mode (from Processor::exit_trap). At this point we - // no longer want to schedule this thread. We can't wait until - // Scheduler::enter_current because we don't want to allow it to - // transition back to user mode. - - if constexpr (SCHEDULER_DEBUG) - dbgln("Scheduler[{}]: Thread {} is dying", Processor::id(), *current_thread); - - current_thread->set_state(Thread::Dying); - } - if constexpr (SCHEDULER_RUNNABLE_DEBUG) { dump_thread_list(); } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index dc5910354c8..e25377aacb7 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -241,6 +241,16 @@ void Thread::die_if_needed() u32 unlock_count; [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count); + dbgln_if(THREAD_DEBUG, "Thread {} is dying", *this); + + { + ScopedSpinLock lock(g_scheduler_lock); + // It's possible that we don't reach the code after this block if the + // scheduler is invoked and FinalizerTask cleans up this thread, however + // that doesn't matter because we're trying to invoke the scheduler anyway + set_state(Thread::Dying); + } + ScopedCritical critical; // Flag a context switch. Because we're in a critical section, @@ -316,6 +326,8 @@ LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore) void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore) { + VERIFY(state() == Thread::Running); + // Clearing the critical section may trigger the context switch // flagged by calling Scheduler::donate_to or Scheduler::yield // above. We have to do it this way because we intentionally