Преглед на файлове

Kernel: Enhance WaitQueue to remember pending wakes

If WaitQueue::wake_all, WaitQueue::wake_one, or WaitQueue::wake_n
is called but nobody is currently waiting, we should remember that
fact and prevent someone from waiting after such a request. This
solves a race condition where the Finalizer thread is notified
to finalize a thread, but it is not (yet) waiting on this queue.

Fixes #2693
Tom преди 5 години
родител
ревизия
9725bda63e
променени са 5 файла, в които са добавени 93 реда и са изтрити 27 реда
  1. 14 14
      Kernel/Lock.cpp
  2. 7 1
      Kernel/Thread.cpp
  3. 1 0
      Kernel/Thread.h
  4. 68 10
      Kernel/WaitQueue.cpp
  5. 3 2
      Kernel/WaitQueue.h

+ 14 - 14
Kernel/Lock.cpp

@@ -54,20 +54,20 @@ void Lock::lock(Mode mode)
     for (;;) {
         bool expected = false;
         if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
-            // FIXME: Do not add new readers if writers are queued.
-            bool modes_dont_conflict = !modes_conflict(m_mode, mode);
-            bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
-            if (modes_dont_conflict || already_hold_exclusive_lock) {
-                // We got the lock!
-                if (!already_hold_exclusive_lock)
-                    m_mode = mode;
-                m_holder = current_thread;
-                m_times_locked++;
-                m_lock.store(false, AK::memory_order_release);
-                return;
-            }
-            timeval* timeout = nullptr;
-            current_thread->wait_on(m_queue, m_name, timeout, &m_lock, m_holder);
+            do {
+                // FIXME: Do not add new readers if writers are queued.
+                bool modes_dont_conflict = !modes_conflict(m_mode, mode);
+                bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
+                if (modes_dont_conflict || already_hold_exclusive_lock) {
+                    // We got the lock!
+                    if (!already_hold_exclusive_lock)
+                        m_mode = mode;
+                    m_holder = current_thread;
+                    m_times_locked++;
+                    m_lock.store(false, AK::memory_order_release);
+                    return;
+                }
+             } while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked);
         } else if (Processor::current().in_critical()) {
             // If we're in a critical section and trying to lock, no context
             // switch will happen, so yield.

+ 7 - 1
Kernel/Thread.cpp

@@ -872,12 +872,18 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
         // we need to wait until the scheduler lock is released again
         {
             ScopedSpinLock sched_lock(g_scheduler_lock);
+            if (!queue.enqueue(*Thread::current())) {
+                // The WaitQueue was already requested to wake someone when
+                // nobody was waiting. So return right away as we shouldn't
+                // be waiting
+                return BlockResult::NotBlocked;
+            }
+
             did_unlock = unlock_process_if_locked();
             if (lock)
                 *lock = false;
             set_state(State::Queued);
             m_wait_reason = reason;
-            queue.enqueue(*Thread::current());
 
     
             if (timeout) {

+ 1 - 0
Kernel/Thread.h

@@ -301,6 +301,7 @@ public:
 
     enum class BlockResult {
         WokeNormally,
+        NotBlocked,
         InterruptedBySignal,
         InterruptedByDeath,
         InterruptedByTimeout,

+ 68 - 10
Kernel/WaitQueue.cpp

@@ -27,8 +27,11 @@
 #include <Kernel/Thread.h>
 #include <Kernel/WaitQueue.h>
 
+//#define WAITQUEUE_DEBUG
+
 namespace Kernel {
 
+
 WaitQueue::WaitQueue()
 {
 }
@@ -37,10 +40,20 @@ WaitQueue::~WaitQueue()
 {
 }
 
-void WaitQueue::enqueue(Thread& thread)
+bool WaitQueue::enqueue(Thread& thread)
 {
     ScopedSpinLock queue_lock(m_lock);
+    if (m_wake_requested) {
+        // wake_* was called when no threads were in the queue
+        // we shouldn't wait at all
+        m_wake_requested = false;
+#ifdef WAITQUEUE_DEBUG
+        dbg() << "WaitQueue " << VirtualAddress(this) << ": enqueue: wake_all pending";
+#endif
+        return false;
+    }
     m_threads.append(thread);
+    return true;
 }
 
 void WaitQueue::wake_one(Atomic<bool>* lock)
@@ -48,42 +61,87 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
     ScopedSpinLock queue_lock(m_lock);
     if (lock)
         *lock = false;
-    if (m_threads.is_empty())
+    if (m_threads.is_empty()) {
+        // Save the fact that a wake was requested
+        m_wake_requested = true;
+#ifdef WAITQUEUE_DEBUG
+        dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: nobody to wake, mark as pending";
+#endif
         return;
-    if (auto* thread = m_threads.take_first())
-        thread->wake_from_queue();
+    }
+#ifdef WAITQUEUE_DEBUG
+    dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one:";
+#endif
+    auto* thread = m_threads.take_first();
+#ifdef WAITQUEUE_DEBUG
+    dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: wake thread " << *thread;
+#endif
+    thread->wake_from_queue();
+    m_wake_requested = false;
     Scheduler::yield();
 }
 
-void WaitQueue::wake_n(i32 wake_count)
+void WaitQueue::wake_n(u32 wake_count)
 {
     ScopedSpinLock queue_lock(m_lock);
-    if (m_threads.is_empty())
+    if (m_threads.is_empty()) {
+        // Save the fact that a wake was requested
+        m_wake_requested = true;
+#ifdef WAITQUEUE_DEBUG
+        dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: nobody to wake, mark as pending";
+#endif
         return;
+    }
 
-    for (i32 i = 0; i < wake_count; ++i) {
+#ifdef WAITQUEUE_DEBUG
+    dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: " << wake_count;
+#endif
+    for (u32 i = 0; i < wake_count; ++i) {
         Thread* thread = m_threads.take_first();
         if (!thread)
             break;
+#ifdef WAITQUEUE_DEBUG
+        dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: wake thread " << *thread;
+#endif
         thread->wake_from_queue();
     }
+    m_wake_requested = false;
     Scheduler::yield();
 }
 
 void WaitQueue::wake_all()
 {
     ScopedSpinLock queue_lock(m_lock);
-    if (m_threads.is_empty())
+    if (m_threads.is_empty()) {
+        // Save the fact that a wake was requested
+        m_wake_requested = true;
+#ifdef WAITQUEUE_DEBUG
+        dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: nobody to wake, mark as pending";
+#endif
         return;
-    while (!m_threads.is_empty())
-        m_threads.take_first()->wake_from_queue();
+    }
+#ifdef WAITQUEUE_DEBUG
+    dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: ";
+#endif
+    while (!m_threads.is_empty()) {
+        Thread* thread = m_threads.take_first();
+#ifdef WAITQUEUE_DEBUG
+        dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: wake thread " << *thread;
+#endif
+        thread->wake_from_queue();
+    }
+    m_wake_requested = false;
     Scheduler::yield();
 }
 
 void WaitQueue::clear()
 {
     ScopedSpinLock queue_lock(m_lock);
+#ifdef WAITQUEUE_DEBUG
+    dbg() << "WaitQueue " << VirtualAddress(this) << ": clear";
+#endif
     m_threads.clear();
+    m_wake_requested = false;
 }
 
 }

+ 3 - 2
Kernel/WaitQueue.h

@@ -38,9 +38,9 @@ public:
     WaitQueue();
     ~WaitQueue();
 
-    void enqueue(Thread&);
+    bool enqueue(Thread&);
     void wake_one(Atomic<bool>* lock = nullptr);
-    void wake_n(i32 wake_count);
+    void wake_n(u32 wake_count);
     void wake_all();
     void clear();
 
@@ -48,6 +48,7 @@ private:
     typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
     ThreadList m_threads;
     SpinLock<u32> m_lock;
+    bool m_wake_requested { false };
 };
 
 }