mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-26 01:20:25 +00:00
Kernel/SMP: Make entering/leaving critical sections multi-processor safe
By making these functions static we close a window where we could get preempted after calling Processor::current() and move to another processor. Co-authored-by: Tom <tomut@yahoo.com>
This commit is contained in:
parent
369e3da6a2
commit
9babb92a4b
Notes:
sideshowbarker
2024-07-18 07:10:17 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/9babb92a4b6
11 changed files with 82 additions and 53 deletions
|
@ -121,7 +121,7 @@ class Processor {
|
|||
|
||||
u32 m_cpu;
|
||||
u32 m_in_irq;
|
||||
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_in_critical;
|
||||
volatile u32 m_in_critical {};
|
||||
static Atomic<u32> s_idle_cpu_mask;
|
||||
|
||||
TSS m_tss;
|
||||
|
@ -334,32 +334,34 @@ public:
|
|||
return m_in_irq;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void restore_in_critical(u32 critical)
|
||||
ALWAYS_INLINE static void restore_in_critical(u32 critical)
|
||||
{
|
||||
m_in_critical = critical;
|
||||
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void enter_critical(u32& prev_flags)
|
||||
ALWAYS_INLINE static void enter_critical(u32& prev_flags)
|
||||
{
|
||||
prev_flags = cpu_flags();
|
||||
cli();
|
||||
m_in_critical++;
|
||||
// NOTE: Up until this point we *could* have been preempted.
|
||||
// Now interrupts are disabled, so calling current() is safe.
|
||||
AK::atomic_fetch_add(¤t().m_in_critical, 1u, AK::MemoryOrder::memory_order_relaxed);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
||||
private:
|
||||
ALWAYS_INLINE void do_leave_critical(u32 prev_flags)
|
||||
{
|
||||
cli(); // Need to prevent IRQs from interrupting us here!
|
||||
VERIFY(m_in_critical > 0);
|
||||
if (m_in_critical == 1) {
|
||||
if (!m_in_irq) {
|
||||
deferred_call_execute_pending();
|
||||
VERIFY(m_in_critical == 1);
|
||||
}
|
||||
m_in_critical--;
|
||||
m_in_critical = 0;
|
||||
if (!m_in_irq)
|
||||
check_invoke_scheduler();
|
||||
} else {
|
||||
m_in_critical--;
|
||||
m_in_critical = m_in_critical - 1;
|
||||
}
|
||||
if (prev_flags & 0x200)
|
||||
sti();
|
||||
|
@ -367,28 +369,53 @@ public:
|
|||
cli();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
|
||||
public:
|
||||
ALWAYS_INLINE static void leave_critical(u32 prev_flags)
|
||||
{
|
||||
prev_flags = cpu_flags();
|
||||
u32 prev_crit = m_in_critical.exchange(0, AK::MemoryOrder::memory_order_acquire);
|
||||
if (!m_in_irq)
|
||||
check_invoke_scheduler();
|
||||
if (enable_interrupts)
|
||||
sti();
|
||||
return prev_crit;
|
||||
cli(); // Need to prevent IRQs from interrupting us here!
|
||||
// NOTE: Up until this point we *could* have been preempted!
|
||||
// Now interrupts are disabled, so calling current() is safe
|
||||
current().do_leave_critical(prev_flags);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
|
||||
ALWAYS_INLINE static u32 clear_critical(u32& prev_flags, bool enable_interrupts)
|
||||
{
|
||||
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
|
||||
VERIFY(!prev_crit || !(prev_flags & 0x200));
|
||||
cli();
|
||||
// NOTE: Up until this point we *could* have been preempted!
|
||||
// Now interrupts are disabled, so calling current() is safe
|
||||
// This doesn't have to be atomic, and it's also fine if we
|
||||
// were to be preempted in between these steps (which should
|
||||
// not happen due to the cli call), but if we moved to another
|
||||
// processors m_in_critical would move along with us
|
||||
auto prev_critical = read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
|
||||
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
|
||||
auto& proc = current();
|
||||
if (!proc.m_in_irq)
|
||||
proc.check_invoke_scheduler();
|
||||
if (enable_interrupts || (prev_flags & 0x200))
|
||||
sti();
|
||||
return prev_critical;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void restore_critical(u32 prev_critical, u32 prev_flags)
|
||||
{
|
||||
// NOTE: This doesn't have to be atomic, and it's also fine if we
|
||||
// get preempted in between these steps. If we move to another
|
||||
// processors m_in_critical will move along with us. And if we
|
||||
// are preempted, we would resume with the same flags.
|
||||
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
|
||||
VERIFY(!prev_critical || !(prev_flags & 0x200));
|
||||
if (prev_flags & 0x200)
|
||||
sti();
|
||||
else
|
||||
cli();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 in_critical() { return m_in_critical.load(); }
|
||||
ALWAYS_INLINE static u32 in_critical()
|
||||
{
|
||||
// See comment in Processor::current_thread
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE const FPUState& clean_fpu_state() const
|
||||
{
|
||||
|
|
|
@ -46,14 +46,14 @@ public:
|
|||
{
|
||||
VERIFY(m_valid);
|
||||
m_valid = false;
|
||||
Processor::current().leave_critical(m_prev_flags);
|
||||
Processor::leave_critical(m_prev_flags);
|
||||
}
|
||||
|
||||
void enter()
|
||||
{
|
||||
VERIFY(!m_valid);
|
||||
m_valid = true;
|
||||
Processor::current().enter_critical(m_prev_flags);
|
||||
Processor::enter_critical(m_prev_flags);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -613,7 +613,7 @@ void Processor::exit_trap(TrapFrame& trap)
|
|||
// to trigger a context switch while we're executing this function
|
||||
// See the comment at the end of the function why we don't use
|
||||
// ScopedCritical here.
|
||||
m_in_critical++;
|
||||
m_in_critical = m_in_critical + 1;
|
||||
|
||||
VERIFY(m_in_irq >= trap.prev_irq_level);
|
||||
m_in_irq = trap.prev_irq_level;
|
||||
|
@ -646,11 +646,13 @@ void Processor::exit_trap(TrapFrame& trap)
|
|||
current_thread->update_time_scheduled(Scheduler::current_time(), true, false);
|
||||
}
|
||||
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
|
||||
// Leave the critical section without actually enabling interrupts.
|
||||
// We don't want context switches to happen until we're explicitly
|
||||
// triggering a switch in check_invoke_scheduler.
|
||||
auto new_critical = m_in_critical.fetch_sub(1) - 1;
|
||||
if (!m_in_irq && !new_critical)
|
||||
m_in_critical = m_in_critical - 1;
|
||||
if (!m_in_irq && !m_in_critical)
|
||||
check_invoke_scheduler();
|
||||
}
|
||||
|
||||
|
@ -730,7 +732,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
|
|||
|
||||
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
||||
{
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
VERIFY(wake_count > 0);
|
||||
if (!s_smp_enabled)
|
||||
return 0;
|
||||
|
@ -1311,7 +1313,7 @@ void Processor::assume_context(Thread& thread, FlatPtr flags)
|
|||
Scheduler::prepare_after_exec();
|
||||
// in_critical() should be 2 here. The critical section in Process::exec
|
||||
// and then the scheduler lock
|
||||
VERIFY(Processor::current().in_critical() == 2);
|
||||
VERIFY(Processor::in_critical() == 2);
|
||||
|
||||
do_assume_context(&thread, flags);
|
||||
|
||||
|
|
|
@ -70,8 +70,8 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
|
|||
if (leave_crit) {
|
||||
// Leave the critical section we set up in in Process::exec,
|
||||
// but because we still have the scheduler lock we should end up with 1
|
||||
m_in_critical--; // leave it without triggering anything or restoring flags
|
||||
VERIFY(in_critical() == 1);
|
||||
VERIFY(in_critical() == 2);
|
||||
m_in_critical = 1; // leave it without triggering anything or restoring flags
|
||||
}
|
||||
|
||||
u32 kernel_stack_top = thread.kernel_stack_top();
|
||||
|
|
|
@ -66,8 +66,8 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
|
|||
if (leave_crit) {
|
||||
// Leave the critical section we set up in in Process::exec,
|
||||
// but because we still have the scheduler lock we should end up with 1
|
||||
m_in_critical--; // leave it without triggering anything or restoring flags
|
||||
VERIFY(in_critical() == 1);
|
||||
VERIFY(in_critical() == 2);
|
||||
m_in_critical = 1; // leave it without triggering anything or restoring flags
|
||||
}
|
||||
|
||||
u64 kernel_stack_top = thread.kernel_stack_top();
|
||||
|
|
|
@ -212,7 +212,7 @@ static inline void kmalloc_verify_nospinlock_held()
|
|||
{
|
||||
// Catch bad callers allocating under spinlock.
|
||||
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
|
||||
VERIFY(!Processor::current().in_critical());
|
||||
VERIFY(!Processor::in_critical());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ public:
|
|||
ALWAYS_INLINE u32 lock()
|
||||
{
|
||||
u32 prev_flags;
|
||||
Processor::current().enter_critical(prev_flags);
|
||||
Processor::enter_critical(prev_flags);
|
||||
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
|
||||
Processor::wait_check();
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ public:
|
|||
{
|
||||
VERIFY(is_locked());
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
Processor::current().leave_critical(prev_flags);
|
||||
Processor::leave_critical(prev_flags);
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
|
||||
|
@ -64,7 +64,7 @@ public:
|
|||
auto& proc = Processor::current();
|
||||
FlatPtr cpu = FlatPtr(&proc);
|
||||
u32 prev_flags;
|
||||
proc.enter_critical(prev_flags);
|
||||
Processor::enter_critical(prev_flags);
|
||||
FlatPtr expected = 0;
|
||||
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
|
||||
if (expected == cpu)
|
||||
|
@ -82,7 +82,7 @@ public:
|
|||
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
|
||||
if (--m_recursions == 0)
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
Processor::current().leave_critical(prev_flags);
|
||||
Processor::leave_critical(prev_flags);
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
|
||||
|
|
|
@ -257,7 +257,7 @@ bool Scheduler::yield()
|
|||
auto current_thread = Thread::current();
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq());
|
||||
VERIFY(current_thread != nullptr);
|
||||
if (proc.in_irq() || proc.in_critical()) {
|
||||
if (proc.in_irq() || Processor::in_critical()) {
|
||||
// If we're handling an IRQ we can't switch context, or we're in
|
||||
// a critical section where we don't want to switch contexts, then
|
||||
// delay until exiting the trap or critical section
|
||||
|
|
|
@ -297,7 +297,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
|
|||
FlatPtr load_base_address = 0;
|
||||
|
||||
String elf_name = object_description.absolute_path();
|
||||
VERIFY(!Processor::current().in_critical());
|
||||
VERIFY(!Processor::in_critical());
|
||||
|
||||
Memory::MemoryManager::enter_space(*new_space);
|
||||
|
||||
|
@ -495,7 +495,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
|
|||
RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const ElfW(Ehdr) & main_program_header)
|
||||
{
|
||||
VERIFY(is_user_process());
|
||||
VERIFY(!Processor::current().in_critical());
|
||||
VERIFY(!Processor::in_critical());
|
||||
auto path = main_program_description->absolute_path();
|
||||
|
||||
dbgln_if(EXEC_DEBUG, "do_exec: {}", path);
|
||||
|
@ -686,7 +686,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
|
|||
u32 lock_count_to_restore;
|
||||
[[maybe_unused]] auto rc = big_lock().force_unlock_if_locked(lock_count_to_restore);
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
return KSuccess;
|
||||
}
|
||||
|
||||
|
@ -912,7 +912,7 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
|
|||
return result;
|
||||
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
|
||||
auto current_thread = Thread::current();
|
||||
if (current_thread == new_main_thread) {
|
||||
|
@ -920,14 +920,14 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
|
|||
// and it will be released after the context switch into that
|
||||
// thread. We should also still be in our critical section
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(Processor::current().in_critical() == 1);
|
||||
VERIFY(Processor::in_critical() == 1);
|
||||
g_scheduler_lock.lock();
|
||||
current_thread->set_state(Thread::State::Running);
|
||||
Processor::assume_context(*current_thread, prev_flags);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
Processor::current().leave_critical(prev_flags);
|
||||
Processor::leave_critical(prev_flags);
|
||||
return KSuccess;
|
||||
}
|
||||
|
||||
|
|
|
@ -214,7 +214,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
|
|||
for (;;) {
|
||||
// Yield to the scheduler, and wait for us to resume unblocked.
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
if (&lock != &big_lock && big_lock.own_lock()) {
|
||||
// We're locking another lock and already hold the big lock...
|
||||
// We need to release the big lock
|
||||
|
@ -222,7 +222,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
|
|||
} else {
|
||||
yield_assuming_not_holding_big_lock();
|
||||
}
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
|
||||
ScopedSpinLock block_lock2(m_block_lock);
|
||||
if (should_be_stopped() || state() == Stopped) {
|
||||
|
@ -389,7 +389,7 @@ void Thread::die_if_needed()
|
|||
// Now leave the critical section so that we can also trigger the
|
||||
// actual context switch
|
||||
u32 prev_flags;
|
||||
Processor::current().clear_critical(prev_flags, false);
|
||||
Processor::clear_critical(prev_flags, false);
|
||||
dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq());
|
||||
// We should never get here, but the scoped scheduler lock
|
||||
// will be released by Scheduler::context_switch again
|
||||
|
@ -421,9 +421,9 @@ void Thread::yield_assuming_not_holding_big_lock()
|
|||
InterruptDisabler disable;
|
||||
Scheduler::yield(); // flag a switch
|
||||
u32 prev_flags;
|
||||
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
|
||||
u32 prev_crit = Processor::clear_critical(prev_flags, true);
|
||||
// NOTE: We may be on a different CPU now!
|
||||
Processor::current().restore_critical(prev_crit, prev_flags);
|
||||
Processor::restore_critical(prev_crit, prev_flags);
|
||||
}
|
||||
|
||||
void Thread::yield_and_release_relock_big_lock()
|
||||
|
@ -452,12 +452,12 @@ void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
|
|||
// We have to do it this way because we intentionally
|
||||
// leave the critical section here to be able to switch contexts.
|
||||
u32 prev_flags;
|
||||
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
|
||||
u32 prev_crit = Processor::clear_critical(prev_flags, true);
|
||||
|
||||
// CONTEXT SWITCH HAPPENS HERE!
|
||||
|
||||
// NOTE: We may be on a different CPU now!
|
||||
Processor::current().restore_critical(prev_crit, prev_flags);
|
||||
Processor::restore_critical(prev_crit, prev_flags);
|
||||
|
||||
if (previous_locked != LockMode::Unlocked) {
|
||||
// We've unblocked, relock the process if needed and carry on.
|
||||
|
|
|
@ -921,9 +921,9 @@ public:
|
|||
for (;;) {
|
||||
// Yield to the scheduler, and wait for us to resume unblocked.
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
yield_assuming_not_holding_big_lock();
|
||||
VERIFY(Processor::current().in_critical());
|
||||
VERIFY(Processor::in_critical());
|
||||
|
||||
ScopedSpinLock block_lock2(m_block_lock);
|
||||
if (should_be_stopped() || state() == Stopped) {
|
||||
|
|
Loading…
Reference in a new issue