Kernel: Don't release/relock spinlocks repeatedly during space teardown

Grab the page directory and MM locks once at the start of address space
teardown, then hold onto them across all the region unmapping work.
This commit is contained in:
Andreas Kling 2022-01-12 14:32:21 +01:00
parent 2323cdd914
commit d8206c1059
Notes: sideshowbarker 2024-07-17 21:05:43 +09:00
3 changed files with 15 additions and 5 deletions

View file

@ -321,9 +321,11 @@ void AddressSpace::dump_regions()
void AddressSpace::remove_all_regions(Badge<Process>)
{
VERIFY(Thread::current() == g_finalizer);
SpinlockLocker lock(m_lock);
SpinlockLocker locker(m_lock);
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
for (auto& region : m_regions)
(*region).unmap(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No);
(*region).unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker);
m_regions.clear();
}

View file

@ -234,12 +234,19 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
return success;
}
void Region::unmap(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb)
void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldFlushTLB should_flush_tlb)
{
if (!m_page_directory)
return;
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker);
}
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
{
if (!m_page_directory)
return;
SpinlockLocker page_lock(m_page_directory->get_lock());
SpinlockLocker lock(s_mm_lock);
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);

View file

@ -179,6 +179,7 @@ public:
Yes,
};
void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes);
void unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker);
void remap();