2020-01-18 08:38:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
2022-01-28 23:47:18 +00:00
|
|
|
* Copyright (c) 2022, Idan Horowitz <idan.horowitz@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2021-01-25 15:07:10 +00:00
|
|
|
#include <Kernel/Debug.h>
|
2020-02-16 00:27:42 +00:00
|
|
|
#include <Kernel/KSyms.h>
|
2021-08-07 11:19:39 +00:00
|
|
|
#include <Kernel/Locking/LockLocation.h>
|
2021-07-18 07:10:27 +00:00
|
|
|
#include <Kernel/Locking/Mutex.h>
|
2021-08-21 23:37:17 +00:00
|
|
|
#include <Kernel/Locking/Spinlock.h>
|
2020-12-14 23:36:22 +00:00
|
|
|
#include <Kernel/Thread.h>
|
|
|
|
|
2022-06-17 21:58:01 +00:00
|
|
|
extern bool g_in_early_boot;
|
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2021-08-07 11:58:07 +00:00
|
|
|
void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
|
2020-04-18 09:21:00 +00:00
|
|
|
{
|
2020-12-01 02:04:36 +00:00
|
|
|
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
|
|
|
// and also from within critical sections!
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(!Processor::current_in_irq());
|
2022-06-17 21:58:01 +00:00
|
|
|
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
|
|
|
|
// There are no interrupts enabled in early boot.
|
|
|
|
if (!g_in_early_boot)
|
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
|
|
|
}
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(mode != Mode::Unlocked);
|
2021-12-15 13:56:39 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2021-01-23 17:43:52 +00:00
|
|
|
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker lock(m_lock);
|
2021-07-10 16:23:16 +00:00
|
|
|
bool did_block = false;
|
|
|
|
Mode current_mode = m_mode;
|
|
|
|
switch (current_mode) {
|
|
|
|
case Mode::Unlocked: {
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
|
2021-07-10 16:23:16 +00:00
|
|
|
m_mode = mode;
|
|
|
|
VERIFY(!m_holder);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders == 0);
|
2021-07-10 16:23:16 +00:00
|
|
|
if (mode == Mode::Exclusive) {
|
|
|
|
m_holder = current_thread;
|
|
|
|
} else {
|
|
|
|
VERIFY(mode == Mode::Shared);
|
2022-01-28 23:47:18 +00:00
|
|
|
++m_shared_holders;
|
|
|
|
#if LOCK_SHARED_UPGRADE_DEBUG
|
|
|
|
m_shared_holders_map.set(current_thread, 1);
|
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
}
|
|
|
|
VERIFY(m_times_locked == 0);
|
|
|
|
m_times_locked++;
|
2021-04-24 22:17:02 +00:00
|
|
|
|
2021-01-23 22:29:11 +00:00
|
|
|
#if LOCK_DEBUG
|
2021-07-10 16:23:16 +00:00
|
|
|
if (current_thread) {
|
|
|
|
current_thread->holding_lock(*this, 1, location);
|
|
|
|
}
|
2020-12-14 23:36:22 +00:00
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Mode::Exclusive: {
|
|
|
|
VERIFY(m_holder);
|
|
|
|
if (m_holder != current_thread) {
|
|
|
|
block(*current_thread, mode, lock, 1);
|
|
|
|
did_block = true;
|
2021-07-15 22:10:52 +00:00
|
|
|
// If we blocked then m_mode should have been updated to what we requested
|
|
|
|
VERIFY(m_mode == mode);
|
2021-01-23 17:43:52 +00:00
|
|
|
}
|
2021-01-18 16:25:44 +00:00
|
|
|
|
2021-07-15 22:10:52 +00:00
|
|
|
if (m_mode == Mode::Exclusive) {
|
|
|
|
VERIFY(m_holder == current_thread);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders == 0);
|
2021-07-15 22:10:52 +00:00
|
|
|
} else if (did_block && mode == Mode::Shared) {
|
|
|
|
// Only if we blocked trying to acquire a shared lock the lock would have been converted
|
|
|
|
VERIFY(!m_holder);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders > 0);
|
2021-07-15 22:10:52 +00:00
|
|
|
}
|
2021-07-10 16:23:16 +00:00
|
|
|
|
|
|
|
if constexpr (LOCK_TRACE_DEBUG) {
|
|
|
|
if (mode == Mode::Exclusive)
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln("Mutex::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
|
2021-07-10 16:23:16 +00:00
|
|
|
else
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln("Mutex::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
|
2021-07-10 16:23:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VERIFY(m_times_locked > 0);
|
2021-07-15 22:10:52 +00:00
|
|
|
if (!did_block) {
|
|
|
|
// if we didn't block we must still be an exclusive lock
|
|
|
|
VERIFY(m_mode == Mode::Exclusive);
|
2021-01-23 17:43:52 +00:00
|
|
|
m_times_locked++;
|
2021-07-15 22:10:52 +00:00
|
|
|
}
|
2021-04-24 22:17:02 +00:00
|
|
|
|
2021-01-23 22:29:11 +00:00
|
|
|
#if LOCK_DEBUG
|
2021-07-10 16:23:16 +00:00
|
|
|
current_thread->holding_lock(*this, 1, location);
|
2020-12-14 23:36:22 +00:00
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Mode::Shared: {
|
2022-04-05 23:49:30 +00:00
|
|
|
VERIFY(m_behavior == MutexBehavior::Regular);
|
2021-07-10 16:23:16 +00:00
|
|
|
VERIFY(!m_holder);
|
2021-07-15 22:10:52 +00:00
|
|
|
if (mode == Mode::Exclusive) {
|
2022-01-28 23:47:18 +00:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): blocking for exclusive access, currently shared, locks held {}", this, m_name, m_times_locked);
|
|
|
|
#if LOCK_SHARED_UPGRADE_DEBUG
|
|
|
|
VERIFY(m_shared_holders_map.size() != 1 || m_shared_holders_map.begin()->key != current_thread);
|
|
|
|
#endif
|
|
|
|
// WARNING: The following block will deadlock if the current thread is the only shared locker of this Mutex
|
|
|
|
// and is asking to upgrade the lock to be exclusive without first releasing the shared lock. We have no
|
|
|
|
// allocation-free way to detect such a scenario, so if you suspect that this is the cause of your deadlock,
|
|
|
|
// try turning on LOCK_SHARED_UPGRADE_DEBUG.
|
2021-07-10 16:23:16 +00:00
|
|
|
block(*current_thread, mode, lock, 1);
|
|
|
|
did_block = true;
|
2021-07-15 22:10:52 +00:00
|
|
|
VERIFY(m_mode == mode);
|
2021-01-23 17:43:52 +00:00
|
|
|
}
|
2021-01-18 16:25:44 +00:00
|
|
|
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
|
2021-01-18 16:25:44 +00:00
|
|
|
|
2021-07-10 16:23:16 +00:00
|
|
|
VERIFY(m_times_locked > 0);
|
2021-07-15 22:10:52 +00:00
|
|
|
if (m_mode == Mode::Shared) {
|
|
|
|
VERIFY(!m_holder);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(!did_block);
|
2021-07-15 22:10:52 +00:00
|
|
|
} else if (did_block) {
|
|
|
|
VERIFY(mode == Mode::Exclusive);
|
|
|
|
VERIFY(m_holder == current_thread);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders == 0);
|
2021-07-15 22:10:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!did_block) {
|
|
|
|
// if we didn't block we must still be a shared lock
|
|
|
|
VERIFY(m_mode == Mode::Shared);
|
2021-01-23 17:43:52 +00:00
|
|
|
m_times_locked++;
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders > 0);
|
|
|
|
++m_shared_holders;
|
|
|
|
#if LOCK_SHARED_UPGRADE_DEBUG
|
|
|
|
auto it = m_shared_holders_map.find(current_thread);
|
|
|
|
if (it != m_shared_holders_map.end())
|
2021-01-23 17:43:52 +00:00
|
|
|
it->value++;
|
|
|
|
else
|
2022-01-28 23:47:18 +00:00
|
|
|
m_shared_holders_map.set(current_thread, 1);
|
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
}
|
2021-04-24 22:17:02 +00:00
|
|
|
|
2021-01-23 22:29:11 +00:00
|
|
|
#if LOCK_DEBUG
|
2021-07-10 16:23:16 +00:00
|
|
|
current_thread->holding_lock(*this, 1, location);
|
2020-12-14 23:36:22 +00:00
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
2019-07-29 10:00:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-17 19:09:51 +00:00
|
|
|
void Mutex::unlock()
|
2019-07-29 10:00:14 +00:00
|
|
|
{
|
2020-12-01 02:04:36 +00:00
|
|
|
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
|
|
|
// and also from within critical sections!
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(!Processor::current_in_irq());
|
2022-06-17 21:58:01 +00:00
|
|
|
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
|
|
|
|
// There are no interrupts enabled in early boot.
|
|
|
|
if (!g_in_early_boot)
|
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
|
|
|
}
|
2021-12-15 13:56:39 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker lock(m_lock);
|
2021-07-10 16:23:16 +00:00
|
|
|
Mode current_mode = m_mode;
|
|
|
|
if constexpr (LOCK_TRACE_DEBUG) {
|
|
|
|
if (current_mode == Mode::Shared)
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln("Mutex::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
|
2021-07-10 16:23:16 +00:00
|
|
|
else
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln("Mutex::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
|
2021-07-10 16:23:16 +00:00
|
|
|
}
|
2021-01-18 16:25:44 +00:00
|
|
|
|
2021-07-10 16:23:16 +00:00
|
|
|
VERIFY(current_mode != Mode::Unlocked);
|
2020-04-18 09:21:00 +00:00
|
|
|
|
2021-07-10 16:23:16 +00:00
|
|
|
VERIFY(m_times_locked > 0);
|
|
|
|
m_times_locked--;
|
2020-12-14 23:36:22 +00:00
|
|
|
|
2021-07-10 16:23:16 +00:00
|
|
|
switch (current_mode) {
|
|
|
|
case Mode::Exclusive:
|
|
|
|
VERIFY(m_holder == current_thread);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders == 0);
|
2021-07-10 16:23:16 +00:00
|
|
|
if (m_times_locked == 0)
|
|
|
|
m_holder = nullptr;
|
|
|
|
break;
|
|
|
|
case Mode::Shared: {
|
|
|
|
VERIFY(!m_holder);
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_shared_holders > 0);
|
|
|
|
--m_shared_holders;
|
|
|
|
#if LOCK_SHARED_UPGRADE_DEBUG
|
|
|
|
auto it = m_shared_holders_map.find(current_thread);
|
|
|
|
if (it->value > 1)
|
2021-07-10 16:23:16 +00:00
|
|
|
it->value--;
|
2022-01-28 23:47:18 +00:00
|
|
|
else
|
|
|
|
m_shared_holders_map.remove(it);
|
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2020-12-14 23:36:22 +00:00
|
|
|
|
2021-01-23 22:29:11 +00:00
|
|
|
#if LOCK_DEBUG
|
2021-07-10 16:23:16 +00:00
|
|
|
if (current_thread) {
|
|
|
|
current_thread->holding_lock(*this, -1, {});
|
|
|
|
}
|
2020-12-01 02:04:36 +00:00
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
|
|
|
|
if (m_times_locked == 0) {
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders == 0);
|
2021-07-10 16:23:16 +00:00
|
|
|
|
|
|
|
m_mode = Mode::Unlocked;
|
|
|
|
unblock_waiters(current_mode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-05 17:02:03 +00:00
|
|
|
void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& lock, u32 requested_locks)
|
2021-07-10 16:23:16 +00:00
|
|
|
{
|
2022-06-17 21:58:01 +00:00
|
|
|
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
|
|
|
|
// There are no interrupts enabled in early boot.
|
|
|
|
if (!g_in_early_boot)
|
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
|
|
|
}
|
2022-04-05 12:44:50 +00:00
|
|
|
m_blocked_thread_lists.with([&](auto& lists) {
|
2022-04-05 23:49:30 +00:00
|
|
|
auto append_to_list = [&]<typename L>(L& list) {
|
|
|
|
VERIFY(!list.contains(current_thread));
|
|
|
|
list.append(current_thread);
|
|
|
|
};
|
|
|
|
|
|
|
|
if (m_behavior == MutexBehavior::BigLock)
|
|
|
|
append_to_list(lists.exclusive_big_lock);
|
|
|
|
else
|
|
|
|
append_to_list(lists.list_for_mode(mode));
|
2022-04-05 12:44:50 +00:00
|
|
|
});
|
2021-07-10 16:23:16 +00:00
|
|
|
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waiting...", this, m_name);
|
2021-07-10 16:23:16 +00:00
|
|
|
current_thread.block(*this, lock, requested_locks);
|
2021-07-17 19:09:51 +00:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waited", this, m_name);
|
2021-07-10 16:23:16 +00:00
|
|
|
|
2022-04-05 12:44:50 +00:00
|
|
|
m_blocked_thread_lists.with([&](auto& lists) {
|
2022-04-05 23:49:30 +00:00
|
|
|
auto remove_from_list = [&]<typename L>(L& list) {
|
|
|
|
VERIFY(list.contains(current_thread));
|
|
|
|
list.remove(current_thread);
|
|
|
|
};
|
|
|
|
|
|
|
|
if (m_behavior == MutexBehavior::BigLock)
|
|
|
|
remove_from_list(lists.exclusive_big_lock);
|
|
|
|
else
|
|
|
|
remove_from_list(lists.list_for_mode(mode));
|
2022-04-05 12:44:50 +00:00
|
|
|
});
|
2021-07-10 16:23:16 +00:00
|
|
|
}
|
|
|
|
|
2021-07-17 19:09:51 +00:00
|
|
|
void Mutex::unblock_waiters(Mode previous_mode)
|
2021-07-10 16:23:16 +00:00
|
|
|
{
|
|
|
|
VERIFY(m_times_locked == 0);
|
|
|
|
VERIFY(m_mode == Mode::Unlocked);
|
|
|
|
|
2022-04-05 12:44:50 +00:00
|
|
|
m_blocked_thread_lists.with([&](auto& lists) {
|
|
|
|
auto unblock_shared = [&]() {
|
|
|
|
if (lists.shared.is_empty())
|
|
|
|
return false;
|
2022-04-05 23:49:30 +00:00
|
|
|
VERIFY(m_behavior == MutexBehavior::Regular);
|
2022-04-05 12:44:50 +00:00
|
|
|
m_mode = Mode::Shared;
|
|
|
|
for (auto& thread : lists.shared) {
|
|
|
|
auto requested_locks = thread.unblock_from_mutex(*this);
|
|
|
|
m_shared_holders += requested_locks;
|
2022-01-28 23:47:18 +00:00
|
|
|
#if LOCK_SHARED_UPGRADE_DEBUG
|
2022-04-05 12:44:50 +00:00
|
|
|
auto set_result = m_shared_holders_map.set(&thread, requested_locks);
|
|
|
|
VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
|
2022-01-28 23:47:18 +00:00
|
|
|
#endif
|
2022-04-05 12:44:50 +00:00
|
|
|
m_times_locked += requested_locks;
|
|
|
|
}
|
2021-07-10 16:23:16 +00:00
|
|
|
return true;
|
2022-04-05 12:44:50 +00:00
|
|
|
};
|
2022-04-05 23:49:30 +00:00
|
|
|
auto unblock_exclusive = [&]<typename L>(L& list) {
|
|
|
|
if (auto* next_exclusive_thread = list.first()) {
|
2022-04-05 12:44:50 +00:00
|
|
|
m_mode = Mode::Exclusive;
|
|
|
|
m_times_locked = next_exclusive_thread->unblock_from_mutex(*this);
|
|
|
|
m_holder = next_exclusive_thread;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
2021-07-10 16:23:16 +00:00
|
|
|
|
2022-04-05 23:49:30 +00:00
|
|
|
if (m_behavior == MutexBehavior::BigLock) {
|
|
|
|
unblock_exclusive(lists.exclusive_big_lock);
|
|
|
|
} else if (previous_mode == Mode::Exclusive) {
|
2022-04-05 12:44:50 +00:00
|
|
|
if (!unblock_shared())
|
2022-04-05 23:49:30 +00:00
|
|
|
unblock_exclusive(lists.exclusive);
|
2022-04-05 12:44:50 +00:00
|
|
|
} else {
|
2022-04-05 23:49:30 +00:00
|
|
|
if (!unblock_exclusive(lists.exclusive))
|
2022-04-05 12:44:50 +00:00
|
|
|
unblock_shared();
|
|
|
|
}
|
|
|
|
});
|
2019-07-29 10:00:14 +00:00
|
|
|
}
|
|
|
|
|
2022-01-28 23:47:18 +00:00
|
|
|
auto Mutex::force_unlock_exclusive_if_locked(u32& lock_count_to_restore) -> Mode
|
2019-07-29 10:00:14 +00:00
|
|
|
{
|
2022-04-09 13:30:34 +00:00
|
|
|
VERIFY(m_behavior == MutexBehavior::BigLock);
|
2020-12-01 02:04:36 +00:00
|
|
|
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
|
|
|
// and also from within critical sections!
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(!Processor::current_in_irq());
|
2022-04-09 13:30:34 +00:00
|
|
|
|
2021-12-15 13:56:39 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker lock(m_lock);
|
2021-07-10 16:23:16 +00:00
|
|
|
auto current_mode = m_mode;
|
|
|
|
switch (current_mode) {
|
|
|
|
case Mode::Exclusive: {
|
|
|
|
if (m_holder != current_thread) {
|
|
|
|
lock_count_to_restore = 0;
|
|
|
|
return Mode::Unlocked;
|
|
|
|
}
|
|
|
|
|
2022-01-28 23:47:18 +00:00
|
|
|
dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::force_unlock_exclusive_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
|
2021-01-24 05:30:10 +00:00
|
|
|
#if LOCK_DEBUG
|
2021-07-10 16:23:16 +00:00
|
|
|
m_holder->holding_lock(*this, -(int)m_times_locked, {});
|
2021-01-24 05:30:10 +00:00
|
|
|
#endif
|
2021-07-10 16:23:16 +00:00
|
|
|
m_holder = nullptr;
|
|
|
|
VERIFY(m_times_locked > 0);
|
|
|
|
lock_count_to_restore = m_times_locked;
|
|
|
|
m_times_locked = 0;
|
|
|
|
m_mode = Mode::Unlocked;
|
|
|
|
unblock_waiters(Mode::Exclusive);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Mode::Unlocked: {
|
|
|
|
lock_count_to_restore = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
|
|
|
return current_mode;
|
2020-12-14 23:36:22 +00:00
|
|
|
}
|
|
|
|
|
2022-01-28 23:47:18 +00:00
|
|
|
void Mutex::restore_exclusive_lock(u32 lock_count, [[maybe_unused]] LockLocation const& location)
|
2020-12-14 23:36:22 +00:00
|
|
|
{
|
2022-04-09 13:30:34 +00:00
|
|
|
VERIFY(m_behavior == MutexBehavior::BigLock);
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(lock_count > 0);
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(!Processor::current_in_irq());
|
2022-04-09 13:30:34 +00:00
|
|
|
|
2021-12-15 13:56:39 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2021-07-10 16:23:16 +00:00
|
|
|
bool did_block = false;
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker lock(m_lock);
|
2022-01-28 23:47:18 +00:00
|
|
|
[[maybe_unused]] auto previous_mode = m_mode;
|
|
|
|
if (m_mode == Mode::Exclusive && m_holder != current_thread) {
|
|
|
|
block(*current_thread, Mode::Exclusive, lock, lock_count);
|
|
|
|
did_block = true;
|
|
|
|
// If we blocked then m_mode should have been updated to what we requested
|
|
|
|
VERIFY(m_mode == Mode::Exclusive);
|
2021-07-10 16:23:16 +00:00
|
|
|
}
|
2020-12-14 23:36:22 +00:00
|
|
|
|
2022-01-28 23:47:18 +00:00
|
|
|
dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::restore_exclusive_lock @ {}: restoring exclusive with lock count {}, was {}", this, lock_count, mode_to_string(previous_mode));
|
2021-07-10 16:23:16 +00:00
|
|
|
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_mode != Mode::Shared);
|
|
|
|
VERIFY(m_shared_holders == 0);
|
|
|
|
if (did_block) {
|
|
|
|
VERIFY(m_times_locked > 0);
|
|
|
|
VERIFY(m_holder == current_thread);
|
|
|
|
} else {
|
|
|
|
if (m_mode == Mode::Unlocked) {
|
|
|
|
m_mode = Mode::Exclusive;
|
|
|
|
VERIFY(m_times_locked == 0);
|
|
|
|
m_times_locked = lock_count;
|
|
|
|
VERIFY(!m_holder);
|
|
|
|
m_holder = current_thread;
|
2021-07-10 16:23:16 +00:00
|
|
|
} else {
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(m_mode == Mode::Exclusive);
|
|
|
|
VERIFY(m_holder == current_thread);
|
|
|
|
VERIFY(m_times_locked > 0);
|
|
|
|
m_times_locked += lock_count;
|
2020-12-01 02:04:36 +00:00
|
|
|
}
|
2022-01-28 23:47:18 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
2021-07-10 16:23:16 +00:00
|
|
|
#if LOCK_DEBUG
|
2022-01-28 23:47:18 +00:00
|
|
|
m_holder->holding_lock(*this, (int)lock_count, location);
|
2021-07-10 16:23:16 +00:00
|
|
|
#endif
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
}
|
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|