Kernel: Use InterruptsState in Spinlock code

This commit updates the lock function from Spinlock and
RecursiveSpinlock to return the InterruptsState of the processor,
instead of the processor flags. The unlock functions would only look at
the interrupt flag of the processor flags, so we now use the
InterruptsState enum to clarify the intent, and such that we can use the
same Spinlock code for the aarch64 build.

To not break the build, all the call sites are updated aswell.
This commit is contained in:
Timon Kruiper 2022-08-23 21:42:30 +02:00 committed by Andreas Kling
parent 6432f3eee8
commit e8aff0c1c8
Notes: sideshowbarker 2024-07-17 07:43:22 +09:00
12 changed files with 41 additions and 51 deletions

View file

@ -21,8 +21,8 @@ public:
{
}
u32 lock();
void unlock(u32 prev_flags);
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
@ -53,8 +53,8 @@ public:
{
}
u32 lock();
void unlock(u32 prev_flags);
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{

View file

@ -11,21 +11,21 @@
namespace Kernel {
u32 Spinlock::lock()
InterruptsState Spinlock::lock()
{
return 0;
return InterruptsState::Disabled;
}
void Spinlock::unlock(u32)
void Spinlock::unlock(InterruptsState)
{
}
u32 RecursiveSpinlock::lock()
InterruptsState RecursiveSpinlock::lock()
{
return 0;
return InterruptsState::Disabled;
}
void RecursiveSpinlock::unlock(u32)
void RecursiveSpinlock::unlock(InterruptsState)
{
}

View file

@ -467,8 +467,7 @@ extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
// to this point
// The target flags will get restored upon leaving the trap
u32 prev_flags = cpu_flags();
Scheduler::leave_on_first_switch(prev_flags);
Scheduler::leave_on_first_switch(processor_interrupts_state());
}
extern "C" UNMAP_AFTER_INIT void post_init_finished(void)

View file

@ -1561,8 +1561,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
FlatPtr flags = trap->regs->flags();
Scheduler::leave_on_first_switch(flags & ~0x200);
Scheduler::leave_on_first_switch(InterruptsState::Disabled);
}
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)

View file

@ -8,35 +8,31 @@
namespace Kernel {
u32 Spinlock::lock()
InterruptsState Spinlock::lock()
{
u32 prev_flags = cpu_flags();
InterruptsState previous_interrupts_state = processor_interrupts_state();
Processor::enter_critical();
cli();
Processor::disable_interrupts();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
Processor::wait_check();
track_lock_acquire(m_rank);
return prev_flags;
return previous_interrupts_state;
}
void Spinlock::unlock(u32 prev_flags)
void Spinlock::unlock(InterruptsState previous_interrupts_state)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
Processor::leave_critical();
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
restore_processor_interrupts_state(previous_interrupts_state);
}
u32 RecursiveSpinlock::lock()
InterruptsState RecursiveSpinlock::lock()
{
u32 prev_flags = cpu_flags();
cli();
InterruptsState previous_interrupts_state = processor_interrupts_state();
Processor::disable_interrupts();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
@ -50,10 +46,10 @@ u32 RecursiveSpinlock::lock()
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
return prev_flags;
return previous_interrupts_state;
}
void RecursiveSpinlock::unlock(u32 prev_flags)
void RecursiveSpinlock::unlock(InterruptsState previous_interrupts_state)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(m_recursions > 0);
@ -64,11 +60,7 @@ void RecursiveSpinlock::unlock(u32 prev_flags)
}
Processor::leave_critical();
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
restore_processor_interrupts_state(previous_interrupts_state);
}
}

View file

@ -25,24 +25,24 @@ public:
: m_lock(&lock)
{
VERIFY(m_lock);
m_prev_flags = m_lock->lock();
m_previous_interrupts_state = m_lock->lock();
m_have_lock = true;
}
SpinlockLocker(SpinlockLocker&& from)
: m_lock(from.m_lock)
, m_prev_flags(from.m_prev_flags)
, m_previous_interrupts_state(from.m_previous_interrupts_state)
, m_have_lock(from.m_have_lock)
{
from.m_lock = nullptr;
from.m_prev_flags = 0;
from.m_previous_interrupts_state = InterruptsState::Disabled;
from.m_have_lock = false;
}
~SpinlockLocker()
{
if (m_lock && m_have_lock) {
m_lock->unlock(m_prev_flags);
m_lock->unlock(m_previous_interrupts_state);
}
}
@ -50,7 +50,7 @@ public:
{
VERIFY(m_lock);
VERIFY(!m_have_lock);
m_prev_flags = m_lock->lock();
m_previous_interrupts_state = m_lock->lock();
m_have_lock = true;
}
@ -58,8 +58,8 @@ public:
{
VERIFY(m_lock);
VERIFY(m_have_lock);
m_lock->unlock(m_prev_flags);
m_prev_flags = 0;
m_lock->unlock(m_previous_interrupts_state);
m_previous_interrupts_state = InterruptsState::Disabled;
m_have_lock = false;
}
@ -70,7 +70,7 @@ public:
private:
LockType* m_lock { nullptr };
u32 m_prev_flags { 0 };
InterruptsState m_previous_interrupts_state { InterruptsState::Disabled };
bool m_have_lock { false };
};

View file

@ -1082,7 +1082,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
{
VERIFY_INTERRUPTS_DISABLED();
auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
mm_data.m_quickmap_previous_interrupts_state = mm_data.m_quickmap_in_use.lock();
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@ -1108,7 +1108,7 @@ void MemoryManager::unquickmap_page()
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
pte.clear();
flush_tlb_local(vaddr);
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_previous_interrupts_state);
}
bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const

View file

@ -90,7 +90,7 @@ struct MemoryManagerData {
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
Spinlock m_quickmap_in_use { LockRank::None };
u32 m_quickmap_prev_flags;
InterruptsState m_quickmap_previous_interrupts_state;
};
// This class represents a set of committed physical pages.

View file

@ -327,13 +327,13 @@ void Scheduler::enter_current(Thread& prev_thread)
}
}
void Scheduler::leave_on_first_switch(u32 flags)
void Scheduler::leave_on_first_switch(InterruptsState previous_interrupts_state)
{
// This is called when a thread is switched into for the first time.
// At this point, enter_current has already be called, but because
// Scheduler::context_switch is not in the call stack we need to
// clean up and release locks manually here
g_scheduler_lock.unlock(flags);
g_scheduler_lock.unlock(previous_interrupts_state);
VERIFY(Processor::current_in_scheduler());
Processor::set_current_in_scheduler(false);

View file

@ -40,7 +40,7 @@ public:
static void yield();
static void context_switch(Thread*);
static void enter_current(Thread& prev_thread);
static void leave_on_first_switch(u32 flags);
static void leave_on_first_switch(InterruptsState);
static void prepare_after_exec();
static void prepare_for_idle_loop();
static Process* colonel();

View file

@ -557,7 +557,7 @@ public:
void begin_requeue()
{
// We need to hold the lock until we moved it over
m_relock_flags = m_lock.lock();
m_previous_interrupts_state = m_lock.lock();
}
void finish_requeue(FutexQueue&);
@ -567,7 +567,7 @@ public:
protected:
FutexQueue& m_futex_queue;
u32 m_bitset { 0 };
u32 m_relock_flags { 0 };
InterruptsState m_previous_interrupts_state { InterruptsState::Disabled };
bool m_did_unblock { false };
};

View file

@ -164,7 +164,7 @@ void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue)
VERIFY(m_lock.is_locked_by_current_processor());
set_blocker_set_raw_locked(&futex_queue);
// We can now release the lock
m_lock.unlock(m_relock_flags);
m_lock.unlock(m_previous_interrupts_state);
}
bool Thread::FutexBlocker::unblock_bitset(u32 bitset)