Sfoglia il codice sorgente

Kernel: Rename Spinlock::is_owned_by_current_thread()

...to is_owned_by_current_processor(). As Tom pointed out, this is
much more accurate. :^)
Andreas Kling 3 anni fa
parent
commit
68bf6db673

+ 1 - 1
Kernel/Arch/x86/common/Interrupts.cpp

@@ -463,7 +463,7 @@ extern "C" void handle_interrupt(TrapFrame*) __attribute__((used));
 
 extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
 {
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
 
     // Because init_finished() will wait on the other APs, we need
     // to release the scheduler lock so that the other APs can also get

+ 1 - 1
Kernel/Interrupts/APIC.cpp

@@ -459,7 +459,7 @@ UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
     VERIFY(cpu < m_processor_enabled_cnt);
     // Since we're waiting on other APs here, we shouldn't have the
     // scheduler lock
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
 
     // Notify the BSP that we are done initializing. It will unmap the startup data at P8000
     m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);

+ 1 - 1
Kernel/Locking/Spinlock.h

@@ -100,7 +100,7 @@ public:
         return m_lock.load(AK::memory_order_relaxed) != 0;
     }
 
-    [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_thread() const
+    [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
     {
         return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
     }

+ 12 - 12
Kernel/Memory/MemoryManager.cpp

@@ -508,8 +508,8 @@ PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical
 PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
 {
     VERIFY_INTERRUPTS_DISABLED();
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
-    VERIFY(page_directory.get_lock().is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
+    VERIFY(page_directory.get_lock().is_locked_by_current_processor());
     u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
     u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
     u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@@ -525,8 +525,8 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress
 PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
 {
     VERIFY_INTERRUPTS_DISABLED();
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
-    VERIFY(page_directory.get_lock().is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
+    VERIFY(page_directory.get_lock().is_locked_by_current_processor());
     u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
     u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
     u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@@ -567,8 +567,8 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
 void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
 {
     VERIFY_INTERRUPTS_DISABLED();
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
-    VERIFY(page_directory.get_lock().is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
+    VERIFY(page_directory.get_lock().is_locked_by_current_processor());
     u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
     u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
     u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@@ -622,7 +622,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
 
 Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
 {
-    VERIFY(space.get_lock().is_locked_by_current_thread());
+    VERIFY(space.get_lock().is_locked_by_current_processor());
     return space.find_region_containing({ vaddr, 1 });
 }
 
@@ -953,7 +953,7 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres
 
 PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
 {
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
     auto& mm_data = get_data();
     auto& pte = boot_pd_kernel_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
     auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
@@ -979,7 +979,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
 
 PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
 {
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
     auto& mm_data = get_data();
     auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
     if (pte.physical_page_base() != pt_paddr.get()) {
@@ -1005,7 +1005,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
 u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
 {
     VERIFY_INTERRUPTS_DISABLED();
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
     auto& mm_data = get_data();
     mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
 
@@ -1026,7 +1026,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
 void MemoryManager::unquickmap_page()
 {
     VERIFY_INTERRUPTS_DISABLED();
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
     auto& mm_data = get_data();
     VERIFY(mm_data.m_quickmap_in_use.is_locked());
     VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
@@ -1039,7 +1039,7 @@ void MemoryManager::unquickmap_page()
 
 bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const
 {
-    VERIFY(space.get_lock().is_locked_by_current_thread());
+    VERIFY(space.get_lock().is_locked_by_current_processor());
 
     if (!is_user_address(vaddr))
         return false;

+ 4 - 4
Kernel/Memory/Region.cpp

@@ -174,7 +174,7 @@ void Region::set_should_cow(size_t page_index, bool cow)
 
 bool Region::map_individual_page_impl(size_t page_index)
 {
-    VERIFY(m_page_directory->get_lock().is_locked_by_current_thread());
+    VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
     auto page_vaddr = vaddr_from_page_index(page_index);
 
     bool user_allowed = page_vaddr.get() >= 0x00800000 && is_user_address(page_vaddr);
@@ -253,7 +253,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
 void Region::set_page_directory(PageDirectory& page_directory)
 {
     VERIFY(!m_page_directory || m_page_directory == &page_directory);
-    VERIFY(s_mm_lock.is_locked_by_current_thread());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
     m_page_directory = page_directory;
 }
 
@@ -394,8 +394,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
 {
     VERIFY_INTERRUPTS_DISABLED();
     VERIFY(vmobject().is_inode());
-    VERIFY(!s_mm_lock.is_locked_by_current_thread());
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!s_mm_lock.is_locked_by_current_processor());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
 
     auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
 

+ 5 - 5
Kernel/Scheduler.cpp

@@ -163,7 +163,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
 
 void Scheduler::enqueue_runnable_thread(Thread& thread)
 {
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
     if (thread.is_idle_thread())
         return;
     auto priority = thread_priority_to_priority_index(thread.priority());
@@ -266,7 +266,7 @@ bool Scheduler::yield()
 
 bool Scheduler::context_switch(Thread* thread)
 {
-    if (Memory::s_mm_lock.is_locked_by_current_thread()) {
+    if (Memory::s_mm_lock.is_locked_by_current_processor()) {
         PANIC("In context switch while holding Memory::s_mm_lock");
     }
 
@@ -320,7 +320,7 @@ bool Scheduler::context_switch(Thread* thread)
 
 void Scheduler::enter_current(Thread& prev_thread, bool is_first)
 {
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
 
     // We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
     auto scheduler_time = Scheduler::current_time();
@@ -362,7 +362,7 @@ void Scheduler::prepare_after_exec()
 {
     // This is called after exec() when doing a context "switch" into
     // the new process. This is called from Processor::assume_context
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
 
     VERIFY(!Processor::current_in_scheduler());
     Processor::set_current_in_scheduler(true);
@@ -372,7 +372,7 @@ void Scheduler::prepare_for_idle_loop()
 {
     // This is called when the CPU finished setting up the idle loop
     // and is about to run it. We need to acquire he scheduler lock
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
     g_scheduler_lock.lock();
 
     VERIFY(!Processor::current_in_scheduler());

+ 1 - 1
Kernel/Syscall.cpp

@@ -236,7 +236,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
     // Check if we're supposed to return to userspace or just die.
     current_thread->die_if_needed();
 
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
 }
 
 }

+ 1 - 1
Kernel/Syscalls/execve.cpp

@@ -922,7 +922,7 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
         // We need to enter the scheduler lock before changing the state
         // and it will be released after the context switch into that
         // thread. We should also still be in our critical section
-        VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+        VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
         VERIFY(Processor::in_critical() == 1);
         g_scheduler_lock.lock();
         current_thread->set_state(Thread::State::Running);

+ 14 - 14
Kernel/Thread.cpp

@@ -160,7 +160,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
     VERIFY(!Processor::current_in_irq());
     VERIFY(this == Thread::current());
     ScopedCritical critical;
-    VERIFY(!Memory::s_mm_lock.is_locked_by_current_thread());
+    VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
 
     SpinlockLocker block_lock(m_block_lock);
 
@@ -198,7 +198,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
 
     for (;;) {
         // Yield to the scheduler, and wait for us to resume unblocked.
-        VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+        VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
         VERIFY(Processor::in_critical());
         if (&lock != &big_lock && big_lock.is_locked_by_current_thread()) {
             // We're locking another lock and already hold the big lock...
@@ -239,8 +239,8 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
         SpinlockLocker block_lock(m_block_lock);
         VERIFY(m_blocking_lock == &lock);
         VERIFY(!Processor::current_in_irq());
-        VERIFY(g_scheduler_lock.is_locked_by_current_thread());
-        VERIFY(m_block_lock.is_locked_by_current_thread());
+        VERIFY(g_scheduler_lock.is_locked_by_current_processor());
+        VERIFY(m_block_lock.is_locked_by_current_processor());
         VERIFY(m_blocking_lock == &lock);
         dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock);
         m_blocking_lock = nullptr;
@@ -285,8 +285,8 @@ void Thread::unblock_from_blocker(Blocker& blocker)
 void Thread::unblock(u8 signal)
 {
     VERIFY(!Processor::current_in_irq());
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
-    VERIFY(m_block_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
+    VERIFY(m_block_lock.is_locked_by_current_processor());
     if (m_state != Thread::Blocked)
         return;
     if (m_blocking_lock)
@@ -402,7 +402,7 @@ void Thread::exit(void* exit_value)
 
 void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
 {
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
     VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().is_locked_by_current_thread());
     // Disable interrupts here. This ensures we don't accidentally switch contexts twice
     InterruptDisabler disable;
@@ -414,7 +414,7 @@ void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_
 
 void Thread::yield_and_release_relock_big_lock()
 {
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
     // Disable interrupts here. This ensures we don't accidentally switch contexts twice
     InterruptDisabler disable;
     Scheduler::yield(); // flag a switch
@@ -612,7 +612,7 @@ u32 Thread::pending_signals() const
 
 u32 Thread::pending_signals_for_state() const
 {
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
     constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
     if (is_handling_page_fault())
         return 0;
@@ -709,7 +709,7 @@ void Thread::send_urgent_signal_to_self(u8 signal)
 
 DispatchSignalResult Thread::dispatch_one_pending_signal()
 {
-    VERIFY(m_lock.is_locked_by_current_thread());
+    VERIFY(m_lock.is_locked_by_current_processor());
     u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
     if (signal_candidates == 0)
         return DispatchSignalResult::Continue;
@@ -816,7 +816,7 @@ void Thread::resume_from_stopped()
 {
     VERIFY(is_stopped());
     VERIFY(m_stop_state != State::Invalid);
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
     if (m_stop_state == Blocked) {
         SpinlockLocker block_lock(m_block_lock);
         if (m_blocker || m_blocking_lock) {
@@ -834,7 +834,7 @@ void Thread::resume_from_stopped()
 DispatchSignalResult Thread::dispatch_signal(u8 signal)
 {
     VERIFY_INTERRUPTS_DISABLED();
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
     VERIFY(signal > 0 && signal <= 32);
     VERIFY(process().is_user_process());
     VERIFY(this == Thread::current());
@@ -1047,7 +1047,7 @@ RefPtr<Thread> Thread::clone(Process& process)
 void Thread::set_state(State new_state, u8 stop_signal)
 {
     State previous_state;
-    VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(g_scheduler_lock.is_locked_by_current_processor());
     if (new_state == m_state)
         return;
 
@@ -1162,7 +1162,7 @@ String Thread::backtrace()
 
     auto& process = const_cast<Process&>(this->process());
     auto stack_trace = Processor::capture_stack_trace(*this);
-    VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+    VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
     ProcessPagingScope paging_scope(process);
     for (auto& frame : stack_trace) {
         if (Memory::is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {

+ 5 - 5
Kernel/Thread.h

@@ -193,7 +193,7 @@ public:
     StringView name() const
     {
         // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
-        VERIFY(m_lock.is_locked_by_current_thread());
+        VERIFY(m_lock.is_locked_by_current_processor());
         return m_name ? m_name->view() : StringView {};
     }
 
@@ -839,7 +839,7 @@ public:
         VERIFY(!Processor::current_in_irq());
         VERIFY(this == Thread::current());
         ScopedCritical critical;
-        VERIFY(!Memory::s_mm_lock.is_locked_by_current_thread());
+        VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
 
         SpinlockLocker block_lock(m_block_lock);
         // We need to hold m_block_lock so that nobody can unblock a blocker as soon
@@ -878,8 +878,8 @@ public:
             // threads to die. In that case
             timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
                 VERIFY(!Processor::current_in_irq());
-                VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
-                VERIFY(!m_block_lock.is_locked_by_current_thread());
+                VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
+                VERIFY(!m_block_lock.is_locked_by_current_processor());
                 // NOTE: this may execute on the same or any other processor!
                 SpinlockLocker scheduler_lock(g_scheduler_lock);
                 SpinlockLocker block_lock(m_block_lock);
@@ -907,7 +907,7 @@ public:
         auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
         for (;;) {
             // Yield to the scheduler, and wait for us to resume unblocked.
-            VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+            VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
             VERIFY(Processor::in_critical());
             yield_without_releasing_big_lock();
             VERIFY(Processor::in_critical());

+ 1 - 1
Kernel/ThreadBlockers.cpp

@@ -162,7 +162,7 @@ Thread::FutexBlocker::~FutexBlocker()
 
 void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue)
 {
-    VERIFY(m_lock.is_locked_by_current_thread());
+    VERIFY(m_lock.is_locked_by_current_processor());
     set_blocker_set_raw_locked(&futex_queue);
     // We can now release the lock
     m_lock.unlock(m_relock_flags);