mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 15:40:19 +00:00
LibThreading: Adjust ThreadPoolLooper m_busy_count sections
The `ThreadPoolLooper` should increment `m_busy_count` before attempting to access the global queue. Otherwise, there exists a possible race condition where `wait_for_all` checks the exit conditions before the looper increments `m_busy_count` but after it empties the `ThreadPool` queue. Next, incrementing / decrementing `m_busy_count` is moved to be the responsibility of `ThreadPoolLooper`. Otherwise, it is possible that decrementing `m_busy_count` in the caller of `Looper::next` causes `m_busy_count` to underflow if the call to `Looper::next` returns before incrementing `m_busy_count`.
This commit is contained in:
parent
a0fd7cf371
commit
8d336d2a25
Notes:
github-actions[bot]
2024-08-19 01:09:04 +00:00
Author: https://github.com/braydnm Commit: https://github.com/LadybirdBrowser/ladybird/commit/8d336d2a25b Pull-request: https://github.com/LadybirdBrowser/ladybird/pull/929 Reviewed-by: https://github.com/alimpfard
1 changed files with 5 additions and 3 deletions
|
@ -22,6 +22,7 @@ struct ThreadPoolLooper {
|
||||||
{
|
{
|
||||||
Optional<typename Pool::Work> entry;
|
Optional<typename Pool::Work> entry;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
pool.m_busy_count++;
|
||||||
entry = pool.m_work_queue.with_locked([&](auto& queue) -> Optional<typename Pool::Work> {
|
entry = pool.m_work_queue.with_locked([&](auto& queue) -> Optional<typename Pool::Work> {
|
||||||
if (queue.is_empty())
|
if (queue.is_empty())
|
||||||
return {};
|
return {};
|
||||||
|
@ -29,6 +30,8 @@ struct ThreadPoolLooper {
|
||||||
});
|
});
|
||||||
if (entry.has_value())
|
if (entry.has_value())
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
pool.m_busy_count--;
|
||||||
if (pool.m_should_exit)
|
if (pool.m_should_exit)
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
|
|
||||||
|
@ -47,8 +50,9 @@ struct ThreadPoolLooper {
|
||||||
pool.m_mutex.unlock();
|
pool.m_mutex.unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.m_busy_count++;
|
|
||||||
pool.m_handler(entry.release_value());
|
pool.m_handler(entry.release_value());
|
||||||
|
pool.m_busy_count--;
|
||||||
|
pool.m_work_done.signal();
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -124,8 +128,6 @@ private:
|
||||||
Looper<ThreadPool> thread_looper;
|
Looper<ThreadPool> thread_looper;
|
||||||
for (; !m_should_exit;) {
|
for (; !m_should_exit;) {
|
||||||
auto result = thread_looper.next(*this, true);
|
auto result = thread_looper.next(*this, true);
|
||||||
m_busy_count--;
|
|
||||||
m_work_done.signal();
|
|
||||||
if (result == IterationDecision::Break)
|
if (result == IterationDecision::Break)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue