diff --git a/Kernel/Arch/Spinlock.h b/Kernel/Arch/Spinlock.h index 084abb69ef8..05709d5f875 100644 --- a/Kernel/Arch/Spinlock.h +++ b/Kernel/Arch/Spinlock.h @@ -21,8 +21,8 @@ public: { } - u32 lock(); - void unlock(u32 prev_flags); + InterruptsState lock(); + void unlock(InterruptsState); [[nodiscard]] ALWAYS_INLINE bool is_locked() const { @@ -53,8 +53,8 @@ public: { } - u32 lock(); - void unlock(u32 prev_flags); + InterruptsState lock(); + void unlock(InterruptsState); [[nodiscard]] ALWAYS_INLINE bool is_locked() const { diff --git a/Kernel/Arch/aarch64/Spinlock.cpp b/Kernel/Arch/aarch64/Spinlock.cpp index e9bdb014eba..bcb81dd2c92 100644 --- a/Kernel/Arch/aarch64/Spinlock.cpp +++ b/Kernel/Arch/aarch64/Spinlock.cpp @@ -11,21 +11,21 @@ namespace Kernel { -u32 Spinlock::lock() +InterruptsState Spinlock::lock() { - return 0; + return InterruptsState::Disabled; } -void Spinlock::unlock(u32) +void Spinlock::unlock(InterruptsState) { } -u32 RecursiveSpinlock::lock() +InterruptsState RecursiveSpinlock::lock() { - return 0; + return InterruptsState::Disabled; } -void RecursiveSpinlock::unlock(u32) +void RecursiveSpinlock::unlock(InterruptsState) { } diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp index 604a74da334..d9fb4299417 100644 --- a/Kernel/Arch/x86/common/Interrupts.cpp +++ b/Kernel/Arch/x86/common/Interrupts.cpp @@ -467,8 +467,7 @@ extern "C" UNMAP_AFTER_INIT void pre_init_finished(void) // to this point // The target flags will get restored upon leaving the trap - u32 prev_flags = cpu_flags(); - Scheduler::leave_on_first_switch(prev_flags); + Scheduler::leave_on_first_switch(processor_interrupts_state()); } extern "C" UNMAP_AFTER_INIT void post_init_finished(void) diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index 46902160f37..21158c54edb 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -1561,8 +1561,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe // the scheduler lock. We don't want to enable interrupts at this point // as we're still in the middle of a context switch. Doing so could // trigger a context switch within a context switch, leading to a crash. - FlatPtr flags = trap->regs->flags(); - Scheduler::leave_on_first_switch(flags & ~0x200); + Scheduler::leave_on_first_switch(InterruptsState::Disabled); } extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) diff --git a/Kernel/Arch/x86/common/Spinlock.cpp b/Kernel/Arch/x86/common/Spinlock.cpp index 5af0b61495b..6cf3cfbae51 100644 --- a/Kernel/Arch/x86/common/Spinlock.cpp +++ b/Kernel/Arch/x86/common/Spinlock.cpp @@ -8,35 +8,31 @@ namespace Kernel { -u32 Spinlock::lock() +InterruptsState Spinlock::lock() { - u32 prev_flags = cpu_flags(); + InterruptsState previous_interrupts_state = processor_interrupts_state(); Processor::enter_critical(); - cli(); + Processor::disable_interrupts(); while (m_lock.exchange(1, AK::memory_order_acquire) != 0) Processor::wait_check(); track_lock_acquire(m_rank); - return prev_flags; + return previous_interrupts_state; } -void Spinlock::unlock(u32 prev_flags) +void Spinlock::unlock(InterruptsState previous_interrupts_state) { VERIFY(is_locked()); track_lock_release(m_rank); m_lock.store(0, AK::memory_order_release); Processor::leave_critical(); - - if ((prev_flags & 0x200) != 0) - sti(); - else - cli(); + restore_processor_interrupts_state(previous_interrupts_state); } -u32 RecursiveSpinlock::lock() +InterruptsState RecursiveSpinlock::lock() { - u32 prev_flags = cpu_flags(); - cli(); + InterruptsState previous_interrupts_state = processor_interrupts_state(); + Processor::disable_interrupts(); Processor::enter_critical(); auto& proc = Processor::current(); FlatPtr cpu = FlatPtr(&proc); @@ -50,10 +46,10 @@ u32 RecursiveSpinlock::lock() if (m_recursions == 0) track_lock_acquire(m_rank); m_recursions++; - return prev_flags; + return previous_interrupts_state; } -void RecursiveSpinlock::unlock(u32 prev_flags) +void RecursiveSpinlock::unlock(InterruptsState previous_interrupts_state) { VERIFY_INTERRUPTS_DISABLED(); VERIFY(m_recursions > 0); @@ -64,11 +60,7 @@ void RecursiveSpinlock::unlock(u32 prev_flags) } Processor::leave_critical(); - - if ((prev_flags & 0x200) != 0) - sti(); - else - cli(); + restore_processor_interrupts_state(previous_interrupts_state); } } diff --git a/Kernel/Locking/Spinlock.h b/Kernel/Locking/Spinlock.h index 136ae54bf6f..b424aa689d5 100644 --- a/Kernel/Locking/Spinlock.h +++ b/Kernel/Locking/Spinlock.h @@ -25,24 +25,24 @@ public: : m_lock(&lock) { VERIFY(m_lock); - m_prev_flags = m_lock->lock(); + m_previous_interrupts_state = m_lock->lock(); m_have_lock = true; } SpinlockLocker(SpinlockLocker&& from) : m_lock(from.m_lock) - , m_prev_flags(from.m_prev_flags) + , m_previous_interrupts_state(from.m_previous_interrupts_state) , m_have_lock(from.m_have_lock) { from.m_lock = nullptr; - from.m_prev_flags = 0; + from.m_previous_interrupts_state = InterruptsState::Disabled; from.m_have_lock = false; } ~SpinlockLocker() { if (m_lock && m_have_lock) { - m_lock->unlock(m_prev_flags); + m_lock->unlock(m_previous_interrupts_state); } } @@ -50,7 +50,7 @@ public: { VERIFY(m_lock); VERIFY(!m_have_lock); - m_prev_flags = m_lock->lock(); + m_previous_interrupts_state = m_lock->lock(); m_have_lock = true; } @@ -58,8 +58,8 @@ public: { VERIFY(m_lock); VERIFY(m_have_lock); - m_lock->unlock(m_prev_flags); - m_prev_flags = 0; + m_lock->unlock(m_previous_interrupts_state); + m_previous_interrupts_state = InterruptsState::Disabled; m_have_lock = false; } @@ -70,7 +70,7 @@ public: private: LockType* m_lock { nullptr }; - u32 m_prev_flags { 0 }; + InterruptsState m_previous_interrupts_state { InterruptsState::Disabled }; bool m_have_lock { false }; }; diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 1f1a1252900..5d108e26bf3 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -1082,7 +1082,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) { VERIFY_INTERRUPTS_DISABLED(); auto& mm_data = get_data(); - mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); + mm_data.m_quickmap_previous_interrupts_state = mm_data.m_quickmap_in_use.lock(); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; @@ -1108,7 +1108,7 @@ void MemoryManager::unquickmap_page() auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx]; pte.clear(); flush_tlb_local(vaddr); - mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); + mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_previous_interrupts_state); } bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index a69717805f1..484a645acc0 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -90,7 +90,7 @@ struct MemoryManagerData { static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; } Spinlock m_quickmap_in_use { LockRank::None }; - u32 m_quickmap_prev_flags; + InterruptsState m_quickmap_previous_interrupts_state; }; // This class represents a set of committed physical pages. diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 3b0e59893d7..18bcea2ee25 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -327,13 +327,13 @@ void Scheduler::enter_current(Thread& prev_thread) } } -void Scheduler::leave_on_first_switch(u32 flags) +void Scheduler::leave_on_first_switch(InterruptsState previous_interrupts_state) { // This is called when a thread is switched into for the first time. // At this point, enter_current has already be called, but because // Scheduler::context_switch is not in the call stack we need to // clean up and release locks manually here - g_scheduler_lock.unlock(flags); + g_scheduler_lock.unlock(previous_interrupts_state); VERIFY(Processor::current_in_scheduler()); Processor::set_current_in_scheduler(false); diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index a1d4abde606..01882cab380 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -40,7 +40,7 @@ public: static void yield(); static void context_switch(Thread*); static void enter_current(Thread& prev_thread); - static void leave_on_first_switch(u32 flags); + static void leave_on_first_switch(InterruptsState); static void prepare_after_exec(); static void prepare_for_idle_loop(); static Process* colonel(); diff --git a/Kernel/Thread.h b/Kernel/Thread.h index fdfa1633e80..f5332374513 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -557,7 +557,7 @@ public: void begin_requeue() { // We need to hold the lock until we moved it over - m_relock_flags = m_lock.lock(); + m_previous_interrupts_state = m_lock.lock(); } void finish_requeue(FutexQueue&); @@ -567,7 +567,7 @@ public: protected: FutexQueue& m_futex_queue; u32 m_bitset { 0 }; - u32 m_relock_flags { 0 }; + InterruptsState m_previous_interrupts_state { InterruptsState::Disabled }; bool m_did_unblock { false }; }; diff --git a/Kernel/ThreadBlockers.cpp b/Kernel/ThreadBlockers.cpp index a14e5fef46e..52b89bec741 100644 --- a/Kernel/ThreadBlockers.cpp +++ b/Kernel/ThreadBlockers.cpp @@ -164,7 +164,7 @@ void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue) VERIFY(m_lock.is_locked_by_current_processor()); set_blocker_set_raw_locked(&futex_queue); // We can now release the lock - m_lock.unlock(m_relock_flags); + m_lock.unlock(m_previous_interrupts_state); } bool Thread::FutexBlocker::unblock_bitset(u32 bitset)