Kernel: Track big lock blocked threads in separate list

When we lock a mutex, eventually `Thread::block` is invoked which could
in turn invoke `Process::big_lock().restore_exclusive_lock()`. This
would then try to add the current thread to a different blocked thread
list then the one in use for the original mutex being locked, and
because it's an intrusive list, the thread is removed from its original
list during the `.append()`. When the original mutex eventually
unblocks, we no longer have the thread in the intrusive blocked threads
list and we panic.

Solve this by making the big lock mutex special and giving it its own
blocked thread list. Because the process big lock is temporary and is
being actively removed from e.g. syscalls, it's a matter of time before
we can also remove the fix introduced by this commit.

Fixes issue .
This commit is contained in:
Jelle Raaijmakers 2022-04-06 01:49:30 +02:00 committed by Andreas Kling
parent be26818448
commit 7826729ab2
Notes: sideshowbarker 2024-07-17 14:20:06 +09:00
4 changed files with 48 additions and 13 deletions

View file

@ -90,6 +90,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
return;
}
case Mode::Shared: {
VERIFY(m_behavior == MutexBehavior::Regular);
VERIFY(!m_holder);
if (mode == Mode::Exclusive) {
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): blocking for exclusive access, currently shared, locks held {}", this, m_name, m_times_locked);
@ -207,9 +208,15 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& l
if constexpr (LOCK_IN_CRITICAL_DEBUG)
VERIFY_INTERRUPTS_ENABLED();
m_blocked_thread_lists.with([&](auto& lists) {
auto& list = lists.list_for_mode(mode);
VERIFY(!list.contains(current_thread));
list.append(current_thread);
auto append_to_list = [&]<typename L>(L& list) {
VERIFY(!list.contains(current_thread));
list.append(current_thread);
};
if (m_behavior == MutexBehavior::BigLock)
append_to_list(lists.exclusive_big_lock);
else
append_to_list(lists.list_for_mode(mode));
});
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waiting...", this, m_name);
@ -217,9 +224,15 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& l
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waited", this, m_name);
m_blocked_thread_lists.with([&](auto& lists) {
auto& list = lists.list_for_mode(mode);
VERIFY(list.contains(current_thread));
list.remove(current_thread);
auto remove_from_list = [&]<typename L>(L& list) {
VERIFY(list.contains(current_thread));
list.remove(current_thread);
};
if (m_behavior == MutexBehavior::BigLock)
remove_from_list(lists.exclusive_big_lock);
else
remove_from_list(lists.list_for_mode(mode));
});
}
@ -235,6 +248,7 @@ void Mutex::unblock_waiters(Mode previous_mode)
auto unblock_shared = [&]() {
if (lists.shared.is_empty())
return false;
VERIFY(m_behavior == MutexBehavior::Regular);
m_mode = Mode::Shared;
for (auto& thread : lists.shared) {
auto requested_locks = thread.unblock_from_mutex(*this);
@ -247,8 +261,8 @@ void Mutex::unblock_waiters(Mode previous_mode)
}
return true;
};
auto unblock_exclusive = [&]() {
if (auto* next_exclusive_thread = lists.exclusive.first()) {
auto unblock_exclusive = [&]<typename L>(L& list) {
if (auto* next_exclusive_thread = list.first()) {
m_mode = Mode::Exclusive;
m_times_locked = next_exclusive_thread->unblock_from_mutex(*this);
m_holder = next_exclusive_thread;
@ -257,11 +271,13 @@ void Mutex::unblock_waiters(Mode previous_mode)
return false;
};
if (previous_mode == Mode::Exclusive) {
if (m_behavior == MutexBehavior::BigLock) {
unblock_exclusive(lists.exclusive_big_lock);
} else if (previous_mode == Mode::Exclusive) {
if (!unblock_shared())
unblock_exclusive();
unblock_exclusive(lists.exclusive);
} else {
if (!unblock_exclusive())
if (!unblock_exclusive(lists.exclusive))
unblock_shared();
}
});

View file

@ -27,8 +27,15 @@ class Mutex {
public:
using Mode = LockMode;
Mutex(StringView name = {})
// FIXME: remove this after annihilating Process::m_big_lock
enum class MutexBehavior {
Regular,
BigLock,
};
Mutex(StringView name = {}, MutexBehavior behavior = MutexBehavior::Regular)
: m_name(name)
, m_behavior(behavior)
{
}
~Mutex() = default;
@ -72,12 +79,18 @@ public:
private:
using BlockedThreadList = IntrusiveList<&Thread::m_blocked_threads_list_node>;
// FIXME: remove this after annihilating Process::m_big_lock
using BigLockBlockedThreadList = IntrusiveList<&Thread::m_big_lock_blocked_threads_list_node>;
void block(Thread&, Mode, SpinlockLocker<Spinlock>&, u32);
void unblock_waiters(Mode);
StringView m_name;
Mode m_mode { Mode::Unlocked };
// FIXME: remove this after annihilating Process::m_big_lock
MutexBehavior m_behavior;
// When locked exclusively, only the thread already holding the lock can
// lock it again. When locked in shared mode, any thread can do that.
u32 m_times_locked { 0 };
@ -94,6 +107,9 @@ private:
BlockedThreadList exclusive;
BlockedThreadList shared;
// FIXME: remove this after annihilating Process::m_big_lock
BigLockBlockedThreadList exclusive_big_lock;
ALWAYS_INLINE BlockedThreadList& list_for_mode(Mode mode)
{
VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);

View file

@ -809,7 +809,7 @@ private:
size_t m_master_tls_size { 0 };
size_t m_master_tls_alignment { 0 };
Mutex m_big_lock { "Process" };
Mutex m_big_lock { "Process", Mutex::MutexBehavior::BigLock };
Mutex m_ptrace_lock { "ptrace" };
RefPtr<Timer> m_alarm_timer;

View file

@ -1228,6 +1228,9 @@ private:
LockRank m_lock_rank_mask { LockRank::None };
bool m_allocation_enabled { true };
// FIXME: remove this after annihilating Process::m_big_lock
IntrusiveListNode<Thread> m_big_lock_blocked_threads_list_node;
#if LOCK_DEBUG
struct HoldingLockInfo {
Mutex* lock;