Kernel: Fix a few deadlocks with Thread::m_lock and g_scheduler_lock

g_scheduler_lock cannot safely be acquired after Thread::m_lock
because another processor may already hold g_scheduler_lock and wait
for the same Thread::m_lock.
This commit is contained in:
Tom 2020-10-25 20:22:59 -06:00 committed by Andreas Kling
parent 8c764319ad
commit 1e2e3eed62
Notes: sideshowbarker 2024-07-19 01:43:19 +09:00
6 changed files with 55 additions and 37 deletions

View file

@ -322,6 +322,7 @@ NonnullRefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thre
process->ref();
}
ScopedSpinLock lock(g_scheduler_lock);
first_thread->set_affinity(affinity);
first_thread->set_state(Thread::State::Runnable);
return process;
@ -781,6 +782,7 @@ RefPtr<Thread> Process::create_kernel_thread(void (*entry)(), u32 priority, cons
auto& tss = thread->tss();
tss.eip = (FlatPtr)entry;
ScopedSpinLock lock(g_scheduler_lock);
thread->set_state(Thread::State::Runnable);
return thread;
}

View file

@ -308,7 +308,10 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
if (was_profiling)
Profiling::did_exec(path);
new_main_thread->set_state(Thread::State::Runnable);
{
ScopedSpinLock lock(g_scheduler_lock);
new_main_thread->set_state(Thread::State::Runnable);
}
big_lock().force_unlock_if_locked();
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());

View file

@ -79,24 +79,25 @@ pid_t Process::sys$fork(RegisterState& regs)
dbg() << "fork: child will begin executing at " << String::format("%w", child_tss.cs) << ":" << String::format("%x", child_tss.eip) << " with stack " << String::format("%w", child_tss.ss) << ":" << String::format("%x", child_tss.esp) << ", kstack " << String::format("%w", child_tss.ss0) << ":" << String::format("%x", child_tss.esp0);
#endif
ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
#ifdef FORK_DEBUG
dbg() << "fork: cloning Region{" << &region << "} '" << region.name() << "' @ " << region.vaddr();
#endif
auto& child_region = child->add_region(region.clone());
child_region.map(child->page_directory());
if (&region == m_master_tls_region)
child->m_master_tls_region = child_region.make_weak_ptr();
}
{
ScopedSpinLock lock(g_processes_lock);
ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
#ifdef FORK_DEBUG
dbg() << "fork: cloning Region{" << &region << "} '" << region.name() << "' @ " << region.vaddr();
#endif
auto& child_region = child->add_region(region.clone());
child_region.map(child->page_directory());
if (&region == m_master_tls_region)
child->m_master_tls_region = child_region.make_weak_ptr();
}
ScopedSpinLock processes_lock(g_processes_lock);
g_processes->prepend(child);
child->ref(); // This reference will be dropped by Process::reap
}
ScopedSpinLock lock(g_scheduler_lock);
child_first_thread->set_affinity(Thread::current()->affinity());
child_first_thread->set_state(Thread::State::Runnable);
return child->pid().value();

View file

@ -70,7 +70,6 @@ int Process::sys$create_thread(void* (*entry)(void*), Userspace<const Syscall::S
builder.appendf("[%d]", thread->tid().value());
thread->set_name(builder.to_string());
thread->set_priority(requested_thread_priority);
if (!is_thread_joinable)
thread->detach();
@ -83,6 +82,9 @@ int Process::sys$create_thread(void* (*entry)(void*), Userspace<const Syscall::S
auto tsr_result = thread->make_thread_specific_region({});
if (tsr_result.is_error())
return tsr_result.error();
ScopedSpinLock lock(g_scheduler_lock);
thread->set_priority(requested_thread_priority);
thread->set_state(Thread::State::Runnable);
return thread->tid().value();
}

View file

@ -119,6 +119,7 @@ Thread::~Thread()
void Thread::unblock()
{
ASSERT(g_scheduler_lock.own_lock());
ASSERT(m_lock.own_lock());
m_blocker = nullptr;
if (Thread::current() == this) {
@ -288,14 +289,19 @@ void Thread::finalize()
ASSERT(Thread::current() == g_finalizer);
ASSERT(Thread::current() != this);
#ifdef THREAD_DEBUG
dbg() << "Finalizing thread " << *this;
#endif
set_state(Thread::State::Dead);
ASSERT(!m_lock.own_lock());
if (auto* joiner = m_joiner.exchange(nullptr, AK::memory_order_acq_rel)) {
// Notify joiner that we exited
static_cast<JoinBlocker*>(joiner->m_blocker)->joinee_exited(m_exit_value);
{
ScopedSpinLock lock(g_scheduler_lock);
#ifdef THREAD_DEBUG
dbg() << "Finalizing thread " << *this;
#endif
set_state(Thread::State::Dead);
if (auto* joiner = m_joiner.exchange(nullptr, AK::memory_order_acq_rel)) {
// Notify joiner that we exited
static_cast<JoinBlocker*>(joiner->m_blocker)->joinee_exited(m_exit_value);
}
}
if (m_dump_backtrace_on_finalization)
@ -522,6 +528,7 @@ void Thread::resume_from_stopped()
{
ASSERT(is_stopped());
ASSERT(m_stop_state != State::Invalid);
ASSERT(g_scheduler_lock.own_lock());
set_state(m_stop_state);
m_stop_state = State::Invalid;
// make sure SemiPermanentBlocker is unblocked
@ -788,7 +795,7 @@ RefPtr<Thread> Thread::clone(Process& process)
void Thread::set_state(State new_state)
{
ScopedSpinLock lock(g_scheduler_lock);
ASSERT(g_scheduler_lock.own_lock());
if (new_state == m_state)
return;

View file

@ -422,28 +422,31 @@ public:
{
T t(forward<Args>(args)...);
ScopedSpinLock lock(m_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
{
ScopedSpinLock lock(m_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
if (t.should_unblock(*this)) {
// Don't block if the wake condition is already met
return BlockResult::NotBlocked;
if (t.should_unblock(*this)) {
// Don't block if the wake condition is already met
return BlockResult::NotBlocked;
}
m_blocker = &t;
m_blocker_timeout = t.override_timeout(timeout);
}
m_blocker = &t;
m_blocker_timeout = t.override_timeout(timeout);
set_state(Thread::Blocked);
// Release our lock
lock.unlock();
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
set_state(Thread::Blocked);
}
// Yield to the scheduler, and wait for us to resume unblocked.
yield_without_holding_big_lock();
// Acquire our lock again
lock.lock();
ScopedSpinLock lock(m_lock);
// We should no longer be blocked once we woke up
ASSERT(state() != Thread::Blocked);