mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 07:30:19 +00:00
Kernel: Fix some problems with Thread::wait_on and Lock
This changes the Thread::wait_on function to not enable interrupts upon leaving, which caused some problems with page fault handlers and in other situations. It may now be called from critical sections, with interrupts enabled or disabled, and returns to the same state. This also requires some fixes to Lock. To aid debugging, a new define LOCK_DEBUG is added that enables checking for Lock leaks upon finalization of a Thread.
This commit is contained in:
parent
9e32d79e02
commit
78f1b5e359
Notes:
sideshowbarker
2024-07-19 01:08:28 +09:00
Author: https://github.com/tomuta Commit: https://github.com/SerenityOS/serenity/commit/78f1b5e359f Pull-request: https://github.com/SerenityOS/serenity/pull/4281
10 changed files with 178 additions and 83 deletions
|
@ -62,7 +62,7 @@ if (ALL_THE_DEBUG_MACROS)
|
|||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DICMP_DEBUG -DICO_DEBUG -DImage_DEBUG -DIMAGE_DECODER_CLIENT_DEBUG -DIMAGE_DECODER_DEBUG -DIMAGE_LOADER_DEBUG -DINTERPRETER_DEBUG -DINTERRUPT_DEBUG -DIOAPIC_DEBUG -DIPC_DEBUG -DIPV4_DEBUG -DIPV4_SOCKET_DEBUG -DIRC_DEBUG -DIRQ_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJOB_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DKEYBOARD_DEBUG -DKEYBOARD_SHORTCUTS_DEBUG -DKMALLOC_DEBUG_LARGE_ALLOCATIONS")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG -DLOCK_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMALLOC_DEBUG -DMASTERPTY_DEBUG -DMBR_DEBUG -DMEMORY_DEBUG -DMENU_DEBUG -DMINIMIZE_ANIMATION_DEBUG -DMM_DEBUG -DMOVE_DEBUG -DMULTIPROCESSOR_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNETWORK_TASK_DEBUG -DNT_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOBJECT_DEBUG -DOCCLUSIONS_DEBUG -DOFFD_DEBUG")
|
||||
|
|
|
@ -1052,14 +1052,6 @@ public:
|
|||
return *this;
|
||||
}
|
||||
|
||||
void set_interrupt_flag_on_destruction(bool flag)
|
||||
{
|
||||
if (flag)
|
||||
m_prev_flags |= 0x200;
|
||||
else
|
||||
m_prev_flags &= ~0x200;
|
||||
}
|
||||
|
||||
void leave()
|
||||
{
|
||||
ASSERT(m_valid);
|
||||
|
|
|
@ -44,6 +44,7 @@ class InodeWatcher;
|
|||
class KBuffer;
|
||||
class KResult;
|
||||
class LocalSocket;
|
||||
class Lock;
|
||||
class MappedROM;
|
||||
class MasterPTY;
|
||||
class PageDirectory;
|
||||
|
|
114
Kernel/Lock.cpp
114
Kernel/Lock.cpp
|
@ -27,78 +27,87 @@
|
|||
#include <AK/TemporaryChange.h>
|
||||
#include <Kernel/KSyms.h>
|
||||
#include <Kernel/Lock.h>
|
||||
#include <Kernel/Thread.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static bool modes_conflict(Lock::Mode mode1, Lock::Mode mode2)
|
||||
{
|
||||
if (mode1 == Lock::Mode::Unlocked || mode2 == Lock::Mode::Unlocked)
|
||||
return false;
|
||||
|
||||
if (mode1 == Lock::Mode::Shared && mode2 == Lock::Mode::Shared)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
void Lock::lock(Mode mode)
|
||||
{
|
||||
ASSERT(mode != Mode::Unlocked);
|
||||
if (!are_interrupts_enabled()) {
|
||||
klog() << "Interrupts disabled when trying to take Lock{" << m_name << "}";
|
||||
dump_backtrace();
|
||||
Processor::halt();
|
||||
lock("unknown", 0, mode);
|
||||
}
|
||||
|
||||
void Lock::lock(const char* file, int line, Mode mode)
|
||||
#else
|
||||
void Lock::lock(Mode mode)
|
||||
#endif
|
||||
{
|
||||
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
||||
// and also from within critical sections!
|
||||
ASSERT(!Processor::current().in_irq());
|
||||
ASSERT(mode != Mode::Unlocked);
|
||||
auto current_thread = Thread::current();
|
||||
ScopedCritical critical;
|
||||
for (;;) {
|
||||
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
||||
do {
|
||||
// FIXME: Do not add new readers if writers are queued.
|
||||
bool modes_dont_conflict = !modes_conflict(m_mode, mode);
|
||||
bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
|
||||
if (modes_dont_conflict || already_hold_exclusive_lock) {
|
||||
bool can_lock;
|
||||
switch (m_mode) {
|
||||
case Lock::Mode::Unlocked:
|
||||
can_lock = true;
|
||||
break;
|
||||
case Lock::Mode::Shared:
|
||||
can_lock = (mode == Lock::Mode::Shared);
|
||||
break;
|
||||
case Lock::Mode::Exclusive:
|
||||
can_lock = (m_holder == current_thread);
|
||||
break;
|
||||
}
|
||||
if (can_lock) {
|
||||
// We got the lock!
|
||||
if (!already_hold_exclusive_lock)
|
||||
if (m_mode == Lock::Mode::Unlocked) {
|
||||
m_mode = mode;
|
||||
ASSERT(m_times_locked == 0);
|
||||
if (mode == Mode::Exclusive)
|
||||
m_holder = current_thread;
|
||||
}
|
||||
#ifdef LOCK_DEBUG
|
||||
current_thread->holding_lock(*this, true, file, line);
|
||||
#endif
|
||||
m_times_locked++;
|
||||
m_lock.store(false, AK::memory_order_release);
|
||||
return;
|
||||
}
|
||||
} while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked);
|
||||
} else if (Processor::current().in_critical()) {
|
||||
// If we're in a critical section and trying to lock, no context
|
||||
// switch will happen, so yield.
|
||||
// The assumption is that if we call this from a critical section
|
||||
// that we DO want to temporarily leave it
|
||||
u32 prev_flags;
|
||||
u32 prev_crit = Processor::current().clear_critical(prev_flags, !Processor::current().in_irq());
|
||||
|
||||
Scheduler::yield();
|
||||
|
||||
// Note, we may now be on a different CPU!
|
||||
Processor::current().restore_critical(prev_crit, prev_flags);
|
||||
} else {
|
||||
// We need to process e.g. smp messages
|
||||
Processor::wait_check();
|
||||
// I don't know *who* is using "m_lock", so just yield.
|
||||
Scheduler::yield_from_critical();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Lock::unlock()
|
||||
{
|
||||
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
||||
// and also from within critical sections!
|
||||
ASSERT(!Processor::current().in_irq());
|
||||
auto current_thread = Thread::current();
|
||||
ScopedCritical critical;
|
||||
for (;;) {
|
||||
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
||||
ASSERT(m_times_locked);
|
||||
--m_times_locked;
|
||||
|
||||
ASSERT(m_mode != Mode::Unlocked);
|
||||
if (m_mode == Mode::Exclusive)
|
||||
|
||||
if (m_mode == Mode::Exclusive) {
|
||||
ASSERT(m_holder == current_thread);
|
||||
if (m_holder == current_thread && (m_mode == Mode::Shared || m_times_locked == 0))
|
||||
if (m_times_locked == 0)
|
||||
m_holder = nullptr;
|
||||
}
|
||||
#ifdef LOCK_DEBUG
|
||||
current_thread->holding_lock(*this, false);
|
||||
#endif
|
||||
|
||||
if (m_times_locked > 0) {
|
||||
m_lock.store(false, AK::memory_order_release);
|
||||
|
@ -109,29 +118,36 @@ void Lock::unlock()
|
|||
return;
|
||||
}
|
||||
// I don't know *who* is using "m_lock", so just yield.
|
||||
// The assumption is that if we call this from a critical section
|
||||
// that we DO want to temporarily leave it
|
||||
u32 prev_flags;
|
||||
u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
|
||||
|
||||
Scheduler::yield();
|
||||
|
||||
// Note, we may now be on a different CPU!
|
||||
Processor::current().restore_critical(prev_crit, prev_flags);
|
||||
Scheduler::yield_from_critical();
|
||||
}
|
||||
}
|
||||
|
||||
bool Lock::force_unlock_if_locked()
|
||||
{
|
||||
ASSERT(m_mode != Mode::Shared);
|
||||
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
||||
// and also from within critical sections!
|
||||
ASSERT(!Processor::current().in_irq());
|
||||
ScopedCritical critical;
|
||||
if (m_holder != Thread::current())
|
||||
for (;;) {
|
||||
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
||||
if (m_holder != Thread::current()) {
|
||||
m_lock.store(false, AK::MemoryOrder::memory_order_release);
|
||||
return false;
|
||||
}
|
||||
ASSERT(m_mode != Mode::Shared);
|
||||
ASSERT(m_times_locked == 1);
|
||||
#ifdef LOCK_DEBUG
|
||||
m_holder->holding_lock(*this, false);
|
||||
#endif
|
||||
m_holder = nullptr;
|
||||
m_mode = Mode::Unlocked;
|
||||
m_times_locked = 0;
|
||||
m_queue.wake_one();
|
||||
m_queue.wake_one(&m_lock);
|
||||
break;
|
||||
}
|
||||
// I don't know *who* is using "m_lock", so just yield.
|
||||
Scheduler::yield_from_critical();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <AK/Types.h>
|
||||
#include <Kernel/Arch/i386/CPU.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Thread.h>
|
||||
#include <Kernel/WaitQueue.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
@ -50,6 +51,9 @@ public:
|
|||
};
|
||||
|
||||
void lock(Mode = Mode::Exclusive);
|
||||
#ifdef LOCK_DEBUG
|
||||
void lock(const char* file, int line, Mode mode = Mode::Exclusive);
|
||||
#endif
|
||||
void unlock();
|
||||
bool force_unlock_if_locked();
|
||||
bool is_locked() const { return m_holder; }
|
||||
|
@ -77,6 +81,13 @@ private:
|
|||
|
||||
class Locker {
|
||||
public:
|
||||
#ifdef LOCK_DEBUG
|
||||
ALWAYS_INLINE explicit Locker(const char* file, int line, Lock& l, Lock::Mode mode = Lock::Mode::Exclusive)
|
||||
: m_lock(l)
|
||||
{
|
||||
m_lock.lock(file, line, mode);
|
||||
}
|
||||
#endif
|
||||
ALWAYS_INLINE explicit Locker(Lock& l, Lock::Mode mode = Lock::Mode::Exclusive)
|
||||
: m_lock(l)
|
||||
{
|
||||
|
@ -90,7 +101,11 @@ private:
|
|||
Lock& m_lock;
|
||||
};
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
# define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__)
|
||||
#else
|
||||
# define LOCKER(...) Locker locker(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
class Lockable {
|
||||
|
|
|
@ -511,6 +511,21 @@ void Scheduler::invoke_async()
|
|||
pick_next();
|
||||
}
|
||||
|
||||
void Scheduler::yield_from_critical()
|
||||
{
|
||||
auto& proc = Processor::current();
|
||||
ASSERT(proc.in_critical());
|
||||
ASSERT(!proc.in_irq());
|
||||
|
||||
yield(); // Flag a context switch
|
||||
|
||||
u32 prev_flags;
|
||||
u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
|
||||
|
||||
// Note, we may now be on a different CPU!
|
||||
Processor::current().restore_critical(prev_crit, prev_flags);
|
||||
}
|
||||
|
||||
void Scheduler::notify_finalizer()
|
||||
{
|
||||
if (g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel) == false)
|
||||
|
|
|
@ -57,6 +57,7 @@ public:
|
|||
[[noreturn]] static void start();
|
||||
static bool pick_next();
|
||||
static bool yield();
|
||||
static void yield_from_critical();
|
||||
static bool donate_to_and_switch(Thread*, const char* reason);
|
||||
static bool donate_to(RefPtr<Thread>&, const char* reason);
|
||||
static bool context_switch(Thread*);
|
||||
|
|
|
@ -316,7 +316,16 @@ void Thread::finalize()
|
|||
ASSERT(Thread::current() == g_finalizer);
|
||||
ASSERT(Thread::current() != this);
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
ASSERT(!m_lock.own_lock());
|
||||
if (lock_count() > 0) {
|
||||
dbg() << "Thread " << *this << " leaking " << lock_count() << " Locks!";
|
||||
ScopedSpinLock list_lock(m_holding_locks_lock);
|
||||
for (auto& info : m_holding_locks_list)
|
||||
dbg() << " - " << info.lock->name() << " @ " << info.lock << " locked at " << info.file << ":" << info.line << " count: " << info.count;
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
#endif
|
||||
|
||||
{
|
||||
ScopedSpinLock lock(g_scheduler_lock);
|
||||
|
@ -1080,10 +1089,9 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
|
|||
}
|
||||
});
|
||||
if (!timer) {
|
||||
if (lock)
|
||||
*lock = false;
|
||||
// We timed out already, don't block
|
||||
// The API contract guarantees we return with interrupts enabled,
|
||||
// regardless of how we got called
|
||||
critical.set_interrupt_flag_on_destruction(true);
|
||||
return BlockResult::InterruptedByTimeout;
|
||||
}
|
||||
}
|
||||
|
@ -1094,16 +1102,13 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
|
|||
// The WaitQueue was already requested to wake someone when
|
||||
// nobody was waiting. So return right away as we shouldn't
|
||||
// be waiting
|
||||
|
||||
// The API contract guarantees we return with interrupts enabled,
|
||||
// regardless of how we got called
|
||||
critical.set_interrupt_flag_on_destruction(true);
|
||||
// NOTE: Do not set lock to false in this case!
|
||||
return BlockResult::NotBlocked;
|
||||
}
|
||||
|
||||
did_unlock = unlock_process_if_locked();
|
||||
if (lock)
|
||||
*lock = false;
|
||||
did_unlock = unlock_process_if_locked();
|
||||
set_state(State::Queued);
|
||||
m_wait_reason = reason;
|
||||
|
||||
|
@ -1158,9 +1163,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
|
|||
TimerQueue::the().cancel_timer(timer.release_nonnull());
|
||||
}
|
||||
|
||||
// The API contract guarantees we return with interrupts enabled,
|
||||
// regardless of how we got called
|
||||
sti();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,8 @@
|
|||
#include <LibC/fd_set.h>
|
||||
#include <LibELF/AuxiliaryVector.h>
|
||||
|
||||
//#define LOCK_DEBUG
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
enum class DispatchSignalResult {
|
||||
|
@ -961,6 +963,44 @@ public:
|
|||
|
||||
RecursiveSpinLock& get_lock() const { return m_lock; }
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
void holding_lock(Lock& lock, bool holding, const char* file = nullptr, int line = 0)
|
||||
{
|
||||
m_holding_locks.fetch_add(holding ? 1 : -1, AK::MemoryOrder::memory_order_relaxed);
|
||||
ScopedSpinLock list_lock(m_holding_locks_lock);
|
||||
if (holding) {
|
||||
bool have_existing = false;
|
||||
for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
|
||||
auto& info = m_holding_locks_list[i];
|
||||
if (info.lock == &lock) {
|
||||
have_existing = true;
|
||||
info.count++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!have_existing)
|
||||
m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
|
||||
} else {
|
||||
bool found = false;
|
||||
for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
|
||||
auto& info = m_holding_locks_list[i];
|
||||
if (info.lock == &lock) {
|
||||
ASSERT(info.count > 0);
|
||||
if (--info.count == 0)
|
||||
m_holding_locks_list.remove(i);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(found);
|
||||
}
|
||||
}
|
||||
u32 lock_count() const
|
||||
{
|
||||
return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
IntrusiveListNode m_runnable_list_node;
|
||||
IntrusiveListNode m_wait_queue_node;
|
||||
|
@ -1004,9 +1044,11 @@ private:
|
|||
auto& blocker = static_cast<JoinBlocker&>(b);
|
||||
|
||||
// NOTE: m_lock is held already!
|
||||
if (m_thread_did_exit)
|
||||
if (m_thread_did_exit) {
|
||||
blocker.unblock(exit_value(), true);
|
||||
return m_thread_did_exit;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1050,6 +1092,18 @@ private:
|
|||
const char* m_wait_reason { nullptr };
|
||||
WaitQueue* m_queue { nullptr };
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
struct HoldingLockInfo {
|
||||
Lock* lock;
|
||||
const char* file;
|
||||
int line;
|
||||
unsigned count;
|
||||
};
|
||||
Atomic<u32> m_holding_locks { 0 };
|
||||
SpinLock<u8> m_holding_locks_lock;
|
||||
Vector<HoldingLockInfo> m_holding_locks_list;
|
||||
#endif
|
||||
|
||||
JoinBlockCondition m_join_condition;
|
||||
Atomic<bool> m_is_active { false };
|
||||
bool m_is_joinable { true };
|
||||
|
|
|
@ -474,10 +474,9 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(vmobject().is_inode());
|
||||
|
||||
sti();
|
||||
LOCKER(vmobject().m_paging_lock);
|
||||
cli();
|
||||
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
|
||||
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region];
|
||||
|
||||
|
@ -501,7 +500,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: page_in_from_inode ready to read from inode";
|
||||
#endif
|
||||
sti();
|
||||
|
||||
u8 page_buffer[PAGE_SIZE];
|
||||
auto& inode = inode_vmobject.inode();
|
||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
|
||||
|
@ -514,7 +513,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
|
||||
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
|
||||
}
|
||||
cli();
|
||||
|
||||
vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
|
||||
if (vmobject_physical_page_entry.is_null()) {
|
||||
klog() << "MM: handle_inode_fault was unable to allocate a physical page";
|
||||
|
|
Loading…
Reference in a new issue