소스 검색

Kernel: Dequeue dying threads from WaitQueue

If a thread is waiting but getting killed, we need to dequeue
the thread from the WaitQueue so that a potential wake before
finalization doesn't happen.
Tom 4 년 전
부모
커밋
41d2a0e9f7
4개의 변경된 파일37개의 추가작업 그리고 7개의 파일을 삭제
  1. 24 7
      Kernel/Thread.cpp
  2. 1 0
      Kernel/Thread.h
  3. 10 0
      Kernel/WaitQueue.cpp
  4. 2 0
      Kernel/WaitQueue.h

+ 24 - 7
Kernel/Thread.cpp

@@ -714,10 +714,20 @@ void Thread::set_state(State new_state)
         ASSERT(g_scheduler_data->has_thread(*this));
         ASSERT(g_scheduler_data->has_thread(*this));
     }
     }
 
 
-    if (m_state == Dying && this != Thread::current() && is_finalizable()) {
-        // Some other thread set this thread to Dying, notify the
-        // finalizer right away as it can be cleaned up now
-        Scheduler::notify_finalizer();
+    if (m_state == Dying) {
+        if (previous_state == Queued) {
+            // Holding the scheduler lock, we need to dequeue this thread
+            ASSERT(m_wait_queue != nullptr);
+            m_wait_queue->dequeue(*this);
+            m_wait_queue = nullptr;
+        }
+        
+
+        if (this != Thread::current() && is_finalizable()) {
+            // Some other thread set this thread to Dying, notify the
+            // finalizer right away as it can be cleaned up now
+            Scheduler::notify_finalizer();
+        }
     }
     }
 }
 }
 
 
@@ -854,6 +864,7 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
 
 
 Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary)
 Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary)
 {
 {
+    auto* current_thread = Thread::current();
     TimerId timer_id {};
     TimerId timer_id {};
     bool did_unlock;
     bool did_unlock;
 
 
@@ -865,7 +876,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
         // we need to wait until the scheduler lock is released again
         // we need to wait until the scheduler lock is released again
         {
         {
             ScopedSpinLock sched_lock(g_scheduler_lock);
             ScopedSpinLock sched_lock(g_scheduler_lock);
-            if (!queue.enqueue(*Thread::current())) {
+            if (!queue.enqueue(*current_thread)) {
                 // The WaitQueue was already requested to wake someone when
                 // The WaitQueue was already requested to wake someone when
                 // nobody was waiting. So return right away as we shouldn't
                 // nobody was waiting. So return right away as we shouldn't
                 // be waiting
                 // be waiting
@@ -876,6 +887,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
 
 
                 return BlockResult::NotBlocked;
                 return BlockResult::NotBlocked;
             }
             }
+            current_thread->m_wait_queue = &queue;
 
 
             did_unlock = unlock_process_if_locked();
             did_unlock = unlock_process_if_locked();
             if (lock)
             if (lock)
@@ -885,7 +897,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
 
 
             if (timeout) {
             if (timeout) {
                 timer_id = TimerQueue::the().add_timer(*timeout, [&]() {
                 timer_id = TimerQueue::the().add_timer(*timeout, [&]() {
-                    ScopedSpinLock sched_lock(g_scheduler_lock);
                     wake_from_queue();
                     wake_from_queue();
                 });
                 });
             }
             }
@@ -921,7 +932,12 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
         // scheduler lock, which is held when we insert into the queue
         // scheduler lock, which is held when we insert into the queue
         ScopedSpinLock sched_lock(g_scheduler_lock);
         ScopedSpinLock sched_lock(g_scheduler_lock);
 
 
-        if (m_wait_queue_node.is_in_list())
+        // m_wait_queue should have been cleared either by the timeout
+        // or by being woken
+        ASSERT(m_wait_queue == nullptr);
+
+        // If our thread was still in the queue, we timed out
+        if (queue.dequeue(*current_thread))
             result = BlockResult::InterruptedByTimeout;
             result = BlockResult::InterruptedByTimeout;
 
 
         // Make sure we cancel the timer if woke normally.
         // Make sure we cancel the timer if woke normally.
@@ -940,6 +956,7 @@ void Thread::wake_from_queue()
     ScopedSpinLock lock(g_scheduler_lock);
     ScopedSpinLock lock(g_scheduler_lock);
     ASSERT(state() == State::Queued);
     ASSERT(state() == State::Queued);
     m_wait_reason = nullptr;
     m_wait_reason = nullptr;
+    m_wait_queue = nullptr;
     if (this != Thread::current())
     if (this != Thread::current())
         set_state(State::Runnable);
         set_state(State::Runnable);
     else
     else

+ 1 - 0
Kernel/Thread.h

@@ -521,6 +521,7 @@ public:
 private:
 private:
     IntrusiveListNode m_runnable_list_node;
     IntrusiveListNode m_runnable_list_node;
     IntrusiveListNode m_wait_queue_node;
     IntrusiveListNode m_wait_queue_node;
+    WaitQueue* m_wait_queue { nullptr };
 
 
 private:
 private:
     friend class SchedulerData;
     friend class SchedulerData;

+ 10 - 0
Kernel/WaitQueue.cpp

@@ -56,6 +56,16 @@ bool WaitQueue::enqueue(Thread& thread)
     return true;
     return true;
 }
 }
 
 
+bool WaitQueue::dequeue(Thread& thread)
+{
+    ScopedSpinLock queue_lock(m_lock);
+    if (m_threads.contains(thread)) {
+        m_threads.remove(thread);
+        return true;
+    }
+    return false;
+}
+
 void WaitQueue::wake_one(Atomic<bool>* lock)
 void WaitQueue::wake_one(Atomic<bool>* lock)
 {
 {
     ScopedSpinLock queue_lock(m_lock);
     ScopedSpinLock queue_lock(m_lock);

+ 2 - 0
Kernel/WaitQueue.h

@@ -38,7 +38,9 @@ public:
     WaitQueue();
     WaitQueue();
     ~WaitQueue();
     ~WaitQueue();
 
 
+    SpinLock<u32>& get_lock() { return m_lock; }
     bool enqueue(Thread&);
     bool enqueue(Thread&);
+    bool dequeue(Thread&);
     void wake_one(Atomic<bool>* lock = nullptr);
     void wake_one(Atomic<bool>* lock = nullptr);
     void wake_n(u32 wake_count);
     void wake_n(u32 wake_count);
     void wake_all();
     void wake_all();