Prechádzať zdrojové kódy

Kernel: Move Thread::block<BlockerType>() out of the Thread.h header

This function is large and unwieldy and forces Thread.h to #include
a bunch of things. The only reason it was in the header is because we
need to instantiate a blocker based on the templated BlockerType.

We actually keep block<BlockerType>() in the header, but move the
bulk of the function body out of line into Thread::block_impl().

To preserve destructor ordering, we add Blocker::finalize() which is
called where we'd previously destroy the Blocker.
Andreas Kling 3 rokov pred
rodič
commit
f469fb47b8
3 zmenil súbory, kde vykonal 138 pridanie a 118 odobranie
  1. 123 0
      Kernel/Thread.cpp
  2. 6 117
      Kernel/Thread.h
  3. 9 1
      Kernel/ThreadBlockers.cpp

+ 123 - 0
Kernel/Thread.cpp

@@ -147,6 +147,129 @@ Thread::~Thread()
     }
 }
 
+Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blocker)
+{
+    VERIFY(!Processor::current_in_irq());
+    VERIFY(this == Thread::current());
+    ScopedCritical critical;
+    VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
+
+    SpinlockLocker block_lock(m_block_lock);
+    // We need to hold m_block_lock so that nobody can unblock a blocker as soon
+    // as it is constructed and registered elsewhere
+    VERIFY(!m_in_block);
+    TemporaryChange in_block_change(m_in_block, true);
+
+    ScopeGuard finalize_guard([&] {
+        blocker.finalize();
+    });
+
+    if (!blocker.setup_blocker()) {
+        blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::UnblockConditionAlreadyMet);
+        return BlockResult::NotBlocked;
+    }
+
+    SpinlockLocker scheduler_lock(g_scheduler_lock);
+    // Relaxed semantics are fine for timeout_unblocked because we
+    // synchronize on the spin locks already.
+    Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
+    bool timer_was_added = false;
+
+    switch (state()) {
+    case Thread::Stopped:
+        // It's possible that we were requested to be stopped!
+        break;
+    case Thread::Running:
+        VERIFY(m_blocker == nullptr);
+        break;
+    default:
+        VERIFY_NOT_REACHED();
+    }
+
+    m_blocker = &blocker;
+
+    if (auto& block_timeout = blocker.override_timeout(timeout); !block_timeout.is_infinite()) {
+        // Process::kill_all_threads may be called at any time, which will mark all
+        // threads to die. In that case
+        timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
+            VERIFY(!Processor::current_in_irq());
+            VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
+            VERIFY(!m_block_lock.is_locked_by_current_processor());
+            // NOTE: this may execute on the same or any other processor!
+            SpinlockLocker scheduler_lock(g_scheduler_lock);
+            SpinlockLocker block_lock(m_block_lock);
+            if (m_blocker && !timeout_unblocked.exchange(true))
+                unblock();
+        });
+        if (!timer_was_added) {
+            // Timeout is already in the past
+            blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::TimeoutInThePast);
+            m_blocker = nullptr;
+            return BlockResult::InterruptedByTimeout;
+        }
+    }
+
+    blocker.begin_blocking({});
+
+    set_state(Thread::Blocked);
+
+    scheduler_lock.unlock();
+    block_lock.unlock();
+
+    dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
+    bool did_timeout = false;
+    u32 lock_count_to_restore = 0;
+    auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
+    for (;;) {
+        // Yield to the scheduler, and wait for us to resume unblocked.
+        VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
+        VERIFY(Processor::in_critical());
+        yield_without_releasing_big_lock();
+        VERIFY(Processor::in_critical());
+
+        SpinlockLocker block_lock2(m_block_lock);
+        if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
+            block_lock2.unlock();
+            dbgln("Thread should not be unblocking, current state: {}", state_string());
+            set_state(Thread::Blocked);
+            continue;
+        }
+        // Prevent the timeout from unblocking this thread if it happens to
+        // be in the process of firing already
+        did_timeout |= timeout_unblocked.exchange(true);
+        if (m_blocker) {
+            // Remove ourselves...
+            VERIFY(m_blocker == &blocker);
+            m_blocker = nullptr;
+        }
+        dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
+        break;
+    }
+
+    if (blocker.was_interrupted_by_signal()) {
+        SpinlockLocker scheduler_lock(g_scheduler_lock);
+        SpinlockLocker lock(m_lock);
+        dispatch_one_pending_signal();
+    }
+
+    // Notify the blocker that we are no longer blocking. It may need
+    // to clean up now while we're still holding m_lock
+    auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
+
+    if (timer_was_added && !did_timeout) {
+        // Cancel the timer while not holding any locks. This allows
+        // the timer function to complete before we remove it
+        // (e.g. if it's on another processor)
+        TimerQueue::the().cancel_timer(*m_block_timer);
+    }
+    if (previous_locked != LockMode::Unlocked) {
+        // NOTE: this may trigger another call to Thread::block(), so
+        // we need to do this after we're all done and restored m_in_block!
+        relock_process(previous_locked, lock_count_to_restore);
+    }
+    return result;
+}
+
 void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock>& lock_lock, u32 lock_count)
 {
     VERIFY(!Processor::current_in_irq());

+ 6 - 117
Kernel/Thread.h

@@ -300,6 +300,7 @@ public:
         virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
         virtual bool can_be_interrupted() const { return true; }
         virtual bool setup_blocker();
+        virtual void finalize();
 
         Thread& thread() { return m_thread; }
 
@@ -690,6 +691,7 @@ public:
         virtual void was_unblocked(bool) override;
         virtual StringView state_string() const override { return "Selecting"sv; }
         virtual bool setup_blocker() override;
+        virtual void finalize() override;
 
     private:
         size_t collect_unblocked_flags();
@@ -870,125 +872,10 @@ public:
     void block(Kernel::Mutex&, SpinlockLocker<Spinlock>&, u32);
 
     template<typename BlockerType, class... Args>
-    [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
+    BlockResult block(BlockTimeout const& timeout, Args&&... args)
     {
-        VERIFY(!Processor::current_in_irq());
-        VERIFY(this == Thread::current());
-        ScopedCritical critical;
-        VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
-
-        SpinlockLocker block_lock(m_block_lock);
-        // We need to hold m_block_lock so that nobody can unblock a blocker as soon
-        // as it is constructed and registered elsewhere
-        VERIFY(!m_in_block);
-        TemporaryChange in_block_change(m_in_block, true);
-
         BlockerType blocker(forward<Args>(args)...);
-
-        if (!blocker.setup_blocker()) {
-            blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::UnblockConditionAlreadyMet);
-            return BlockResult::NotBlocked;
-        }
-
-        SpinlockLocker scheduler_lock(g_scheduler_lock);
-        // Relaxed semantics are fine for timeout_unblocked because we
-        // synchronize on the spin locks already.
-        Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
-        bool timer_was_added = false;
-
-        switch (state()) {
-        case Thread::Stopped:
-            // It's possible that we were requested to be stopped!
-            break;
-        case Thread::Running:
-            VERIFY(m_blocker == nullptr);
-            break;
-        default:
-            VERIFY_NOT_REACHED();
-        }
-
-        m_blocker = &blocker;
-
-        if (auto& block_timeout = blocker.override_timeout(timeout); !block_timeout.is_infinite()) {
-            // Process::kill_all_threads may be called at any time, which will mark all
-            // threads to die. In that case
-            timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
-                VERIFY(!Processor::current_in_irq());
-                VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
-                VERIFY(!m_block_lock.is_locked_by_current_processor());
-                // NOTE: this may execute on the same or any other processor!
-                SpinlockLocker scheduler_lock(g_scheduler_lock);
-                SpinlockLocker block_lock(m_block_lock);
-                if (m_blocker && !timeout_unblocked.exchange(true))
-                    unblock();
-            });
-            if (!timer_was_added) {
-                // Timeout is already in the past
-                blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::TimeoutInThePast);
-                m_blocker = nullptr;
-                return BlockResult::InterruptedByTimeout;
-            }
-        }
-
-        blocker.begin_blocking({});
-
-        set_state(Thread::Blocked);
-
-        scheduler_lock.unlock();
-        block_lock.unlock();
-
-        dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
-        bool did_timeout = false;
-        u32 lock_count_to_restore = 0;
-        auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
-        for (;;) {
-            // Yield to the scheduler, and wait for us to resume unblocked.
-            VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
-            VERIFY(Processor::in_critical());
-            yield_without_releasing_big_lock();
-            VERIFY(Processor::in_critical());
-
-            SpinlockLocker block_lock2(m_block_lock);
-            if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
-                block_lock2.unlock();
-                dbgln("Thread should not be unblocking, current state: {}", state_string());
-                set_state(Thread::Blocked);
-                continue;
-            }
-            // Prevent the timeout from unblocking this thread if it happens to
-            // be in the process of firing already
-            did_timeout |= timeout_unblocked.exchange(true);
-            if (m_blocker) {
-                // Remove ourselves...
-                VERIFY(m_blocker == &blocker);
-                m_blocker = nullptr;
-            }
-            dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
-            break;
-        }
-
-        if (blocker.was_interrupted_by_signal()) {
-            SpinlockLocker scheduler_lock(g_scheduler_lock);
-            SpinlockLocker lock(m_lock);
-            dispatch_one_pending_signal();
-        }
-
-        // Notify the blocker that we are no longer blocking. It may need
-        // to clean up now while we're still holding m_lock
-        auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
-
-        if (timer_was_added && !did_timeout) {
-            // Cancel the timer while not holding any locks. This allows
-            // the timer function to complete before we remove it
-            // (e.g. if it's on another processor)
-            TimerQueue::the().cancel_timer(*m_block_timer);
-        }
-        if (previous_locked != LockMode::Unlocked) {
-            // NOTE: this may trigger another call to Thread::block(), so
-            // we need to do this after we're all done and restored m_in_block!
-            relock_process(previous_locked, lock_count_to_restore);
-        }
-        return result;
+        return block_impl(timeout, blocker);
     }
 
     u32 unblock_from_lock(Kernel::Mutex&);
@@ -1251,6 +1138,8 @@ public:
 private:
     Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, NonnullOwnPtr<KString>);
 
+    BlockResult block_impl(BlockTimeout const&, Blocker&);
+
     IntrusiveListNode<Thread> m_process_thread_list_node;
     int m_runnable_priority { -1 };
 

+ 9 - 1
Kernel/ThreadBlockers.cpp

@@ -39,7 +39,10 @@ bool Thread::Blocker::add_to_blocker_set(Thread::BlockerSet& blocker_set, void*
 
 Thread::Blocker::~Blocker()
 {
-    VERIFY(!m_lock.is_locked());
+}
+
+void Thread::Blocker::finalize()
+{
     if (m_blocker_set)
         m_blocker_set->remove_blocker(*this);
 }
@@ -371,6 +374,11 @@ bool Thread::SelectBlocker::setup_blocker()
 
 Thread::SelectBlocker::~SelectBlocker()
 {
+}
+
+void Thread::SelectBlocker::finalize()
+{
+    Thread::FileBlocker::finalize();
     for (auto& fd_entry : m_fds)
         fd_entry.description->blocker_set().remove_blocker(*this);
 }