Browse Source

Kernel: Fix blocking relock of the big_lock while unlocking other Lock

When a Thread is being unblocked and we need to re-lock the process
big_lock and re-locking blocks again, then we may end up in
Thread::block again while still servicing the original lock's
Thread::block. So permit recursion as long as it's only the big_lock
that we block on again.

Fixes #8822
Tom 4 years ago
parent
commit
ae8472f9ca
1 changed files with 9 additions and 6 deletions
  1. 9 6
      Kernel/Thread.cpp

+ 9 - 6
Kernel/Thread.cpp

@@ -180,8 +180,6 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
     VERIFY(!s_mm_lock.own_lock());
     VERIFY(!s_mm_lock.own_lock());
 
 
     ScopedSpinLock block_lock(m_block_lock);
     ScopedSpinLock block_lock(m_block_lock);
-    VERIFY(!m_in_block);
-    m_in_block = true;
 
 
     ScopedSpinLock scheduler_lock(g_scheduler_lock);
     ScopedSpinLock scheduler_lock(g_scheduler_lock);
 
 
@@ -195,7 +193,14 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
     default:
     default:
         VERIFY_NOT_REACHED();
         VERIFY_NOT_REACHED();
     }
     }
-    VERIFY(!m_blocking_lock);
+
+    // If we're blocking on the big-lock we may actually be in the process
+    // of unblocking from another lock. If that's the case m_blocking_lock
+    // is already set
+    auto& big_lock = process().big_lock();
+    VERIFY((&lock == &big_lock && m_blocking_lock != &big_lock) || !m_blocking_lock);
+
+    auto previous_blocking_lock = m_blocking_lock;
     m_blocking_lock = &lock;
     m_blocking_lock = &lock;
     m_lock_requested_count = lock_count;
     m_lock_requested_count = lock_count;
 
 
@@ -208,7 +213,6 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
 
 
     dbgln_if(THREAD_DEBUG, "Thread {} blocking on Mutex {}", *this, &lock);
     dbgln_if(THREAD_DEBUG, "Thread {} blocking on Mutex {}", *this, &lock);
 
 
-    auto& big_lock = process().big_lock();
     for (;;) {
     for (;;) {
         // Yield to the scheduler, and wait for us to resume unblocked.
         // Yield to the scheduler, and wait for us to resume unblocked.
         VERIFY(!g_scheduler_lock.own_lock());
         VERIFY(!g_scheduler_lock.own_lock());
@@ -230,8 +234,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
         }
         }
 
 
         VERIFY(!m_blocking_lock);
         VERIFY(!m_blocking_lock);
-        VERIFY(m_in_block);
-        m_in_block = false;
+        m_blocking_lock = previous_blocking_lock;
         break;
         break;
     }
     }