Kernel: Fix Lock race causing infinite spinning between two threads

We need to account for how many shared lock instances the current
thread owns, so that we can properly release such references when
yielding execution.

We also need to release the process lock when donating.
This commit is contained in:
Tom 2020-12-14 16:36:22 -07:00 committed by Andreas Kling
parent 56701f91f9
commit c4176b0da1
Notes: sideshowbarker 2024-07-19 00:47:14 +09:00
10 changed files with 359 additions and 96 deletions

View file

@ -62,7 +62,7 @@ if (ALL_THE_DEBUG_MACROS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DICMP_DEBUG -DICO_DEBUG -DImage_DEBUG -DIMAGE_DECODER_CLIENT_DEBUG -DIMAGE_DECODER_DEBUG -DIMAGE_LOADER_DEBUG -DINTERRUPT_DEBUG -DIOAPIC_DEBUG -DIPC_DEBUG -DIPV4_DEBUG -DIPV4_SOCKET_DEBUG -DIRC_DEBUG -DIRQ_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DICMP_DEBUG -DICO_DEBUG -DImage_DEBUG -DIMAGE_DECODER_CLIENT_DEBUG -DIMAGE_DECODER_DEBUG -DIMAGE_LOADER_DEBUG -DINTERRUPT_DEBUG -DIOAPIC_DEBUG -DIPC_DEBUG -DIPV4_DEBUG -DIPV4_SOCKET_DEBUG -DIRC_DEBUG -DIRQ_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJOB_DEBUG -DJPG_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJOB_DEBUG -DJPG_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DKEYBOARD_DEBUG -DKEYBOARD_SHORTCUTS_DEBUG -DKMALLOC_DEBUG_LARGE_ALLOCATIONS") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DKEYBOARD_DEBUG -DKEYBOARD_SHORTCUTS_DEBUG -DKMALLOC_DEBUG_LARGE_ALLOCATIONS")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG -DLOCK_DEBUG -DLOOKUPSERVER_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG -DLOCK_DEBUG -DLOCK_TRACE_DEBUG -DLOCK_RESTORE_DEBUG -DLOOKUPSERVER_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMALLOC_DEBUG -DMASTERPTY_DEBUG -DMBR_DEBUG -DMEMORY_DEBUG -DMENU_DEBUG -DMINIMIZE_ANIMATION_DEBUG -DMM_DEBUG -DMOVE_DEBUG -DMULTIPROCESSOR_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMALLOC_DEBUG -DMASTERPTY_DEBUG -DMBR_DEBUG -DMEMORY_DEBUG -DMENU_DEBUG -DMINIMIZE_ANIMATION_DEBUG -DMM_DEBUG -DMOVE_DEBUG -DMULTIPROCESSOR_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNETWORK_TASK_DEBUG -DNT_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNETWORK_TASK_DEBUG -DNT_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOBJECT_DEBUG -DOCCLUSIONS_DEBUG -DOFFD_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOBJECT_DEBUG -DOCCLUSIONS_DEBUG -DOFFD_DEBUG")

View file

@ -27,6 +27,10 @@
#include <AK/TemporaryChange.h> #include <AK/TemporaryChange.h>
#include <Kernel/KSyms.h> #include <Kernel/KSyms.h>
#include <Kernel/Lock.h> #include <Kernel/Lock.h>
#include <Kernel/Thread.h>
//#define LOCK_TRACE_DEBUG
//#define LOCK_RESTORE_DEBUG
namespace Kernel { namespace Kernel {
@ -46,38 +50,78 @@ void Lock::lock(Mode mode)
ASSERT(!Processor::current().in_irq()); ASSERT(!Processor::current().in_irq());
ASSERT(mode != Mode::Unlocked); ASSERT(mode != Mode::Unlocked);
auto current_thread = Thread::current(); auto current_thread = Thread::current();
ScopedCritical critical; ScopedCritical critical; // in case we're not in a critical section already
for (;;) { for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
do { do {
// FIXME: Do not add new readers if writers are queued. // FIXME: Do not add new readers if writers are queued.
bool can_lock; auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
switch (m_mode) { switch (current_mode) {
case Lock::Mode::Unlocked: case Mode::Unlocked: {
can_lock = true; #ifdef LOCK_TRACE_DEBUG
break; dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently unlocked";
case Lock::Mode::Shared:
can_lock = (mode == Lock::Mode::Shared);
break;
case Lock::Mode::Exclusive:
can_lock = (m_holder == current_thread);
break;
}
if (can_lock) {
// We got the lock!
if (m_mode == Lock::Mode::Unlocked) {
m_mode = mode;
ASSERT(m_times_locked == 0);
if (mode == Mode::Exclusive)
m_holder = current_thread;
}
#ifdef LOCK_DEBUG
current_thread->holding_lock(*this, true, file, line);
#endif #endif
m_mode.store(mode, AK::MemoryOrder::memory_order_relaxed);
ASSERT(!m_holder);
ASSERT(m_shared_holders.is_empty());
if (mode == Mode::Exclusive) {
m_holder = current_thread;
} else {
ASSERT(mode == Mode::Shared);
m_shared_holders.set(current_thread, 1);
}
ASSERT(m_times_locked == 0);
m_times_locked++; m_times_locked++;
#ifdef LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
#endif
m_lock.store(false, AK::memory_order_release); m_lock.store(false, AK::memory_order_release);
return; return;
} }
case Mode::Exclusive: {
ASSERT(m_holder);
if (m_holder != current_thread)
break;
ASSERT(m_shared_holders.is_empty());
#ifdef LOCK_TRACE_DEBUG
if (mode == Mode::Exclusive)
dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently exclusive, holding: " << m_times_locked;
else
dbg() << "Lock::lock @ " << this << ": acquire exclusive (requested " << mode_to_string(mode) << "), currently exclusive, holding " << m_times_locked;
#endif
ASSERT(mode == Mode::Exclusive || mode == Mode::Shared);
ASSERT(m_times_locked > 0);
m_times_locked++;
#ifdef LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
#endif
m_lock.store(false, AK::memory_order_release);
return;
}
case Mode::Shared: {
ASSERT(!m_holder);
if (mode != Mode::Shared)
break;
#ifdef LOCK_TRACE_DEBUG
dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently shared, locks held: " << m_times_locked;
#endif
ASSERT(m_times_locked > 0);
m_times_locked++;
ASSERT(!m_shared_holders.is_empty());
auto it = m_shared_holders.find(current_thread);
if (it != m_shared_holders.end())
it->value++;
else
m_shared_holders.set(current_thread, 1);
#ifdef LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
#endif
m_lock.store(false, AK::memory_order_release);
return;
}
default:
ASSERT_NOT_REACHED();
}
m_lock.store(false, AK::memory_order_release); m_lock.store(false, AK::memory_order_release);
} while (m_queue.wait_on(nullptr, m_name) == Thread::BlockResult::NotBlocked); } while (m_queue.wait_on(nullptr, m_name) == Thread::BlockResult::NotBlocked);
} else { } else {
@ -93,28 +137,53 @@ void Lock::unlock()
// and also from within critical sections! // and also from within critical sections!
ASSERT(!Processor::current().in_irq()); ASSERT(!Processor::current().in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
ScopedCritical critical; ScopedCritical critical; // in case we're not in a critical section already
for (;;) { for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
ASSERT(m_times_locked); auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
--m_times_locked; #ifdef LOCK_TRACE_DEBUG
if (current_mode == Mode::Shared)
dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", locks held: " << m_times_locked;
else
dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", holding: " << m_times_locked;
#endif
ASSERT(current_mode != Mode::Unlocked);
ASSERT(m_mode != Mode::Unlocked); ASSERT(m_times_locked > 0);
m_times_locked--;
if (m_mode == Mode::Exclusive) { switch (current_mode) {
case Mode::Exclusive:
ASSERT(m_holder == current_thread); ASSERT(m_holder == current_thread);
ASSERT(m_shared_holders.is_empty());
if (m_times_locked == 0) if (m_times_locked == 0)
m_holder = nullptr; m_holder = nullptr;
break;
case Mode::Shared: {
ASSERT(!m_holder);
auto it = m_shared_holders.find(current_thread);
ASSERT(it != m_shared_holders.end());
if (it->value > 1) {
it->value--;
} else {
ASSERT(it->value > 0);
m_shared_holders.remove(it);
}
break;
} }
default:
ASSERT_NOT_REACHED();
}
if (m_times_locked == 0) {
ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed);
}
#ifdef LOCK_DEBUG #ifdef LOCK_DEBUG
current_thread->holding_lock(*this, false); current_thread->holding_lock(*this, -1);
#endif #endif
if (m_times_locked > 0) {
m_lock.store(false, AK::memory_order_release);
return;
}
m_mode = Mode::Unlocked;
m_lock.store(false, AK::memory_order_release); m_lock.store(false, AK::memory_order_release);
m_queue.wake_one(); m_queue.wake_one();
return; return;
@ -124,39 +193,153 @@ void Lock::unlock()
} }
} }
bool Lock::force_unlock_if_locked() auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
{ {
// NOTE: This may be called from an interrupt handler (not an IRQ handler) // NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections! // and also from within critical sections!
ASSERT(!Processor::current().in_irq()); ASSERT(!Processor::current().in_irq());
ScopedCritical critical; auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) { for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
if (m_holder != Thread::current()) { Mode previous_mode;
m_lock.store(false, AK::MemoryOrder::memory_order_release); auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
return false; switch (current_mode) {
} case Mode::Exclusive: {
ASSERT(m_mode != Mode::Shared); if (m_holder != current_thread) {
ASSERT(m_times_locked == 1); m_lock.store(false, AK::MemoryOrder::memory_order_release);
#ifdef LOCK_DEBUG lock_count_to_restore = 0;
m_holder->holding_lock(*this, false); return Mode::Unlocked;
}
#ifdef LOCK_RESTORE_DEBUG
dbg() << "Lock::force_unlock_if_locked @ " << this << ": unlocking exclusive with lock count: " << m_times_locked;
#endif #endif
m_holder = nullptr; m_holder = nullptr;
m_mode = Mode::Unlocked; ASSERT(m_times_locked > 0);
m_times_locked = 0; lock_count_to_restore = m_times_locked;
m_lock.store(false, AK::memory_order_release); m_times_locked = 0;
m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed);
m_lock.store(false, AK::memory_order_release);
#ifdef LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
#endif
previous_mode = Mode::Exclusive;
break;
}
case Mode::Shared: {
ASSERT(!m_holder);
auto it = m_shared_holders.find(current_thread);
if (it == m_shared_holders.end()) {
m_lock.store(false, AK::MemoryOrder::memory_order_release);
lock_count_to_restore = 0;
return Mode::Unlocked;
}
#ifdef LOCK_RESTORE_DEBUG
dbg() << "Lock::force_unlock_if_locked @ " << this << ": unlocking exclusive with lock count: " << it->value << ", total locks: " << m_times_locked;
#endif
ASSERT(it->value > 0);
lock_count_to_restore = it->value;
ASSERT(lock_count_to_restore > 0);
#ifdef LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
#endif
m_shared_holders.remove(it);
ASSERT(m_times_locked >= lock_count_to_restore);
m_times_locked -= lock_count_to_restore;
if (m_times_locked == 0)
m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed);
m_lock.store(false, AK::memory_order_release);
previous_mode = Mode::Shared;
break;
}
case Mode::Unlocked: {
m_lock.store(false, AK::memory_order_relaxed);
lock_count_to_restore = 0;
previous_mode = Mode::Unlocked;
break;
}
default:
ASSERT_NOT_REACHED();
}
m_queue.wake_one(); m_queue.wake_one();
break; return previous_mode;
}
// I don't know *who* is using "m_lock", so just yield.
Scheduler::yield_from_critical();
}
}
#ifdef LOCK_DEBUG
void Lock::restore_lock(Mode mode, u32 lock_count)
{
return restore_lock("unknown", 0, mode, lock_count);
}
void Lock::restore_lock(const char* file, int line, Mode mode, u32 lock_count)
#else
void Lock::restore_lock(Mode mode, u32 lock_count)
#endif
{
ASSERT(mode != Mode::Unlocked);
ASSERT(lock_count > 0);
ASSERT(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
switch (mode) {
case Mode::Exclusive: {
auto expected_mode = Mode::Unlocked;
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive, AK::MemoryOrder::memory_order_relaxed))
break;
#ifdef LOCK_RESTORE_DEBUG
dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was unlocked";
#endif
ASSERT(m_times_locked == 0);
m_times_locked = lock_count;
ASSERT(!m_holder);
ASSERT(m_shared_holders.is_empty());
m_holder = current_thread;
m_lock.store(false, AK::memory_order_release);
#ifdef LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, file, line);
#endif
return;
}
case Mode::Shared: {
auto expected_mode = Mode::Unlocked;
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared, AK::MemoryOrder::memory_order_relaxed) && expected_mode != Mode::Shared)
break;
#ifdef LOCK_RESTORE_DEBUG
dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was " << mode_to_string(expected_mode);
#endif
ASSERT(expected_mode == Mode::Shared || m_times_locked == 0);
m_times_locked += lock_count;
ASSERT(!m_holder);
ASSERT((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
auto set_result = m_shared_holders.set(current_thread, lock_count);
// There may be other shared lock holders already, but we should not have an entry yet
ASSERT(set_result == AK::HashSetResult::InsertedNewEntry);
m_lock.store(false, AK::memory_order_release);
#ifdef LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, file, line);
#endif
return;
}
default:
ASSERT_NOT_REACHED();
}
m_lock.store(false, AK::memory_order_relaxed);
} }
// I don't know *who* is using "m_lock", so just yield. // I don't know *who* is using "m_lock", so just yield.
Scheduler::yield_from_critical(); Scheduler::yield_from_critical();
} }
return true;
} }
void Lock::clear_waiters() void Lock::clear_waiters()
{ {
ASSERT(m_mode != Mode::Shared); ASSERT(m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Shared);
m_queue.wake_all(); m_queue.wake_all();
} }

View file

@ -28,44 +28,60 @@
#include <AK/Assertions.h> #include <AK/Assertions.h>
#include <AK/Atomic.h> #include <AK/Atomic.h>
#include <AK/HashMap.h>
#include <AK/Types.h> #include <AK/Types.h>
#include <Kernel/Arch/i386/CPU.h> #include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Forward.h> #include <Kernel/Forward.h>
#include <Kernel/Thread.h> #include <Kernel/LockMode.h>
#include <Kernel/WaitQueue.h> #include <Kernel/WaitQueue.h>
namespace Kernel { namespace Kernel {
class Lock { class Lock {
AK_MAKE_NONCOPYABLE(Lock);
AK_MAKE_NONMOVABLE(Lock);
public: public:
using Mode = LockMode;
Lock(const char* name = nullptr) Lock(const char* name = nullptr)
: m_name(name) : m_name(name)
{ {
} }
~Lock() { } ~Lock() { }
enum class Mode {
Unlocked,
Shared,
Exclusive
};
void lock(Mode = Mode::Exclusive); void lock(Mode = Mode::Exclusive);
#ifdef LOCK_DEBUG #ifdef LOCK_DEBUG
void lock(const char* file, int line, Mode mode = Mode::Exclusive); void lock(const char* file, int line, Mode mode = Mode::Exclusive);
void restore_lock(const char* file, int line, Mode, u32);
#endif #endif
void unlock(); void unlock();
bool force_unlock_if_locked(); [[nodiscard]] Mode force_unlock_if_locked(u32&);
bool is_locked() const { return m_holder; } void restore_lock(Mode, u32);
bool is_locked() const { return m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Unlocked; }
void clear_waiters(); void clear_waiters();
const char* name() const { return m_name; } const char* name() const { return m_name; }
static const char* mode_to_string(Mode mode)
{
switch (mode) {
case Mode::Unlocked:
return "unlocked";
case Mode::Exclusive:
return "exclusive";
case Mode::Shared:
return "shared";
default:
return "invalid";
}
}
private: private:
Atomic<bool> m_lock { false }; Atomic<bool> m_lock { false };
const char* m_name { nullptr }; const char* m_name { nullptr };
WaitQueue m_queue; WaitQueue m_queue;
Mode m_mode { Mode::Unlocked }; Atomic<Mode> m_mode { Mode::Unlocked };
// When locked exclusively, only the thread already holding the lock can // When locked exclusively, only the thread already holding the lock can
// lock it again. When locked in shared mode, any thread can do that. // lock it again. When locked in shared mode, any thread can do that.
@ -77,6 +93,7 @@ private:
// When locked exclusively, this is always the one thread that holds the // When locked exclusively, this is always the one thread that holds the
// lock. // lock.
RefPtr<Thread> m_holder; RefPtr<Thread> m_holder;
HashMap<Thread*, u32> m_shared_holders;
}; };
class Locker { class Locker {
@ -103,8 +120,10 @@ private:
#ifdef LOCK_DEBUG #ifdef LOCK_DEBUG
# define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__) # define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__)
# define RESTORE_LOCK(lock, ...) (lock).restore_lock(__FILE__, __LINE__, __VA_ARGS__)
#else #else
# define LOCKER(...) Locker locker(__VA_ARGS__) # define LOCKER(...) Locker locker(__VA_ARGS__)
# define RESTORE_LOCK(lock, ...) (lock).restore_lock(__VA_ARGS__)
#endif #endif
template<typename T> template<typename T>

37
Kernel/LockMode.h Normal file
View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2020, The SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
namespace Kernel {
enum class LockMode {
Unlocked,
Shared,
Exclusive
};
}

View file

@ -348,7 +348,8 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
ScopedSpinLock lock(g_scheduler_lock); ScopedSpinLock lock(g_scheduler_lock);
new_main_thread->set_state(Thread::State::Runnable); new_main_thread->set_state(Thread::State::Runnable);
} }
big_lock().force_unlock_if_locked(); u32 lock_count_to_restore;
(void)big_lock().force_unlock_if_locked(lock_count_to_restore);
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical()); ASSERT(Processor::current().in_critical());
return 0; return 0;

View file

@ -41,14 +41,11 @@ int Process::sys$donate(pid_t tid)
if (tid < 0) if (tid < 0)
return -EINVAL; return -EINVAL;
// We don't strictly need to grab the scheduler lock here, but it ScopedCritical critical;
// will close a race where we can find the thread but it disappears
// before we call Scheduler::donate_to.
ScopedSpinLock lock(g_scheduler_lock);
auto thread = Thread::from_tid(tid); auto thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return -ESRCH; return -ESRCH;
Scheduler::donate_to(thread, "sys$donate"); Thread::current()->donate_without_holding_big_lock(thread, "sys$donate");
return 0; return 0;
} }

View file

@ -212,7 +212,8 @@ void Thread::die_if_needed()
if (!m_should_die) if (!m_should_die)
return; return;
unlock_process_if_locked(); u32 unlock_count;
(void)unlock_process_if_locked(unlock_count);
ScopedCritical critical; ScopedCritical critical;
set_should_die(); set_should_die();
@ -238,7 +239,8 @@ void Thread::exit(void* exit_value)
ASSERT(Thread::current() == this); ASSERT(Thread::current() == this);
m_join_condition.thread_did_exit(exit_value); m_join_condition.thread_did_exit(exit_value);
set_should_die(); set_should_die();
unlock_process_if_locked(); u32 unlock_count;
(void)unlock_process_if_locked(unlock_count);
die_if_needed(); die_if_needed();
} }
@ -255,25 +257,33 @@ void Thread::yield_while_not_holding_big_lock()
void Thread::yield_without_holding_big_lock() void Thread::yield_without_holding_big_lock()
{ {
ASSERT(!g_scheduler_lock.own_lock()); ASSERT(!g_scheduler_lock.own_lock());
bool did_unlock = unlock_process_if_locked(); u32 lock_count_to_restore = 0;
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
// NOTE: Even though we call Scheduler::yield here, unless we happen // NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed // to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process. // until leaving it in relock_process.
Scheduler::yield(); Scheduler::yield();
relock_process(did_unlock); relock_process(previous_locked, lock_count_to_restore);
} }
bool Thread::unlock_process_if_locked() void Thread::donate_without_holding_big_lock(RefPtr<Thread>& thread, const char* reason)
{ {
return process().big_lock().force_unlock_if_locked(); ASSERT(!g_scheduler_lock.own_lock());
u32 lock_count_to_restore = 0;
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
Scheduler::donate_to(thread, reason);
relock_process(previous_locked, lock_count_to_restore);
} }
void Thread::lock_process() LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
{ {
process().big_lock().lock(); return process().big_lock().force_unlock_if_locked(lock_count_to_restore);
} }
void Thread::relock_process(bool did_unlock) void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
{ {
// Clearing the critical section may trigger the context switch // Clearing the critical section may trigger the context switch
// flagged by calling Scheduler::donate_to or Scheduler::yield // flagged by calling Scheduler::donate_to or Scheduler::yield
@ -282,13 +292,15 @@ void Thread::relock_process(bool did_unlock)
u32 prev_flags; u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, true); u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
if (did_unlock) { // CONTEXT SWITCH HAPPENS HERE!
// We've unblocked, relock the process if needed and carry on.
process().big_lock().lock();
}
// NOTE: We may be on a different CPU now! // NOTE: We may be on a different CPU now!
Processor::current().restore_critical(prev_crit, prev_flags); Processor::current().restore_critical(prev_crit, prev_flags);
if (previous_locked != LockMode::Unlocked) {
// We've unblocked, relock the process if needed and carry on.
RESTORE_LOCK(process().big_lock(), previous_locked, lock_count_to_restore);
}
} }
auto Thread::sleep(clockid_t clock_id, const timespec& duration, timespec* remaining_time) -> BlockResult auto Thread::sleep(clockid_t clock_id, const timespec& duration, timespec* remaining_time) -> BlockResult

View file

@ -38,6 +38,7 @@
#include <Kernel/Arch/i386/CPU.h> #include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Forward.h> #include <Kernel/Forward.h>
#include <Kernel/KResult.h> #include <Kernel/KResult.h>
#include <Kernel/LockMode.h>
#include <Kernel/Scheduler.h> #include <Kernel/Scheduler.h>
#include <Kernel/ThreadTracer.h> #include <Kernel/ThreadTracer.h>
#include <Kernel/TimerQueue.h> #include <Kernel/TimerQueue.h>
@ -781,6 +782,7 @@ public:
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{ {
ASSERT(!Processor::current().in_irq()); ASSERT(!Processor::current().in_irq());
ASSERT(this == Thread::current());
ScopedCritical critical; ScopedCritical critical;
ScopedSpinLock scheduler_lock(g_scheduler_lock); ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock); ScopedSpinLock block_lock(m_block_lock);
@ -844,12 +846,14 @@ public:
block_lock.unlock(); block_lock.unlock();
bool did_timeout = false; bool did_timeout = false;
bool did_unlock = false; auto previous_locked = LockMode::Unlocked;
u32 lock_count_to_restore = 0;
for (;;) { for (;;) {
scheduler_lock.unlock(); scheduler_lock.unlock();
// Yield to the scheduler, and wait for us to resume unblocked. // Yield to the scheduler, and wait for us to resume unblocked.
did_unlock |= unlock_process_if_locked(); if (previous_locked == LockMode::Unlocked)
previous_locked = unlock_process_if_locked(lock_count_to_restore);
ASSERT(!g_scheduler_lock.own_lock()); ASSERT(!g_scheduler_lock.own_lock());
ASSERT(Processor::current().in_critical()); ASSERT(Processor::current().in_critical());
@ -896,10 +900,10 @@ public:
// (e.g. if it's on another processor) // (e.g. if it's on another processor)
TimerQueue::the().cancel_timer(timer.release_nonnull()); TimerQueue::the().cancel_timer(timer.release_nonnull());
} }
if (did_unlock) { if (previous_locked != LockMode::Unlocked) {
// NOTE: this may trigger another call to Thread::block(), so // NOTE: this may trigger another call to Thread::block(), so
// we need to do this after we're all done and restored m_in_block! // we need to do this after we're all done and restored m_in_block!
lock_process(); relock_process(previous_locked, lock_count_to_restore);
} }
return result; return result;
} }
@ -907,6 +911,13 @@ public:
void unblock_from_blocker(Blocker&); void unblock_from_blocker(Blocker&);
void unblock(u8 signal = 0); void unblock(u8 signal = 0);
template<class... Args>
Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
{
ASSERT(this == Thread::current());
return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
}
BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr); BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr);
BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr) BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr)
{ {
@ -1062,29 +1073,32 @@ public:
RecursiveSpinLock& get_lock() const { return m_lock; } RecursiveSpinLock& get_lock() const { return m_lock; }
#ifdef LOCK_DEBUG #ifdef LOCK_DEBUG
void holding_lock(Lock& lock, bool holding, const char* file = nullptr, int line = 0) void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
{ {
m_holding_locks.fetch_add(holding ? 1 : -1, AK::MemoryOrder::memory_order_relaxed); ASSERT(refs_delta != 0);
m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
ScopedSpinLock list_lock(m_holding_locks_lock); ScopedSpinLock list_lock(m_holding_locks_lock);
if (holding) { if (refs_delta > 0) {
bool have_existing = false; bool have_existing = false;
for (size_t i = 0; i < m_holding_locks_list.size(); i++) { for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
auto& info = m_holding_locks_list[i]; auto& info = m_holding_locks_list[i];
if (info.lock == &lock) { if (info.lock == &lock) {
have_existing = true; have_existing = true;
info.count++; info.count += refs_delta;
break; break;
} }
} }
if (!have_existing) if (!have_existing)
m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 }); m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
} else { } else {
ASSERT(refs_delta < 0);
bool found = false; bool found = false;
for (size_t i = 0; i < m_holding_locks_list.size(); i++) { for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
auto& info = m_holding_locks_list[i]; auto& info = m_holding_locks_list[i];
if (info.lock == &lock) { if (info.lock == &lock) {
ASSERT(info.count > 0); ASSERT(info.count >= (unsigned)-refs_delta);
if (--info.count == 0) info.count -= (unsigned)-refs_delta;
if (info.count == 0)
m_holding_locks_list.remove(i); m_holding_locks_list.remove(i);
found = true; found = true;
break; break;
@ -1162,9 +1176,8 @@ private:
bool m_thread_did_exit { false }; bool m_thread_did_exit { false };
}; };
bool unlock_process_if_locked(); LockMode unlock_process_if_locked(u32&);
void lock_process(); void relock_process(LockMode, u32);
void relock_process(bool did_unlock);
String backtrace_impl(); String backtrace_impl();
void reset_fpu_state(); void reset_fpu_state();
@ -1234,6 +1247,7 @@ private:
Atomic<bool> m_have_any_unmasked_pending_signals { false }; Atomic<bool> m_have_any_unmasked_pending_signals { false };
void yield_without_holding_big_lock(); void yield_without_holding_big_lock();
void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
void yield_while_not_holding_big_lock(); void yield_while_not_holding_big_lock();
void update_state_for_thread(Thread::State previous_state); void update_state_for_thread(Thread::State previous_state);
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> * Copyright (c) 2020, The SerenityOS developers.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> * Copyright (c) 2020, The SerenityOS developers.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without