浏览代码

Kernel: Don't hog the MM lock while unmapping regions

We were holding the MM lock across all of the region unmapping code.
This was previously necessary since the quickmaps used during unmapping
required holding the MM lock.

Now that it's no longer necessary, we can leave the MM lock alone here.
Andreas Kling 2 年之前
父节点
当前提交
d6ef18f587
共有 3 个文件被更改,包括 5 次插入8 次删除
  1. 1 2
      Kernel/Memory/AddressSpace.cpp
  2. 3 5
      Kernel/Memory/Region.cpp
  3. 1 1
      Kernel/Memory/Region.h

+ 1 - 2
Kernel/Memory/AddressSpace.cpp

@@ -341,10 +341,9 @@ void AddressSpace::remove_all_regions(Badge<Process>)
     VERIFY(Thread::current() == g_finalizer);
     {
         SpinlockLocker pd_locker(m_page_directory->get_lock());
-        SpinlockLocker mm_locker(s_mm_lock);
         m_region_tree.with([&](auto& region_tree) {
             for (auto& region : region_tree.regions())
-                region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
+                region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker);
         });
     }
 

+ 3 - 5
Kernel/Memory/Region.cpp

@@ -70,8 +70,7 @@ Region::~Region()
         if (!is_readable() && !is_writable() && !is_executable()) {
             // If the region is "PROT_NONE", we didn't map it in the first place.
         } else {
-            SpinlockLocker mm_locker(s_mm_lock);
-            unmap_with_locks_held(ShouldFlushTLB::Yes, pd_locker, mm_locker);
+            unmap_with_locks_held(ShouldFlushTLB::Yes, pd_locker);
             VERIFY(!m_page_directory);
         }
     }
@@ -268,11 +267,10 @@ void Region::unmap(ShouldFlushTLB should_flush_tlb)
     if (!m_page_directory)
         return;
     SpinlockLocker pd_locker(m_page_directory->get_lock());
-    SpinlockLocker mm_locker(s_mm_lock);
-    unmap_with_locks_held(should_flush_tlb, pd_locker, mm_locker);
+    unmap_with_locks_held(should_flush_tlb, pd_locker);
 }
 
-void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
+void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&)
 {
     if (!m_page_directory)
         return;

+ 1 - 1
Kernel/Memory/Region.h

@@ -183,7 +183,7 @@ public:
     void set_page_directory(PageDirectory&);
     ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
     void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes);
-    void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker);
+    void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker);
 
     void remap();