Browse Source

Kernel: Don't relock MM lock for every page when remapping region

Make sure that callers already hold the MM lock, and we don't have to
worry about reacquiring it every time.
Andreas Kling 3 years ago
parent
commit
bdbff9df24
1 changed files with 3 additions and 3 deletions
  1. 3 3
      Kernel/Memory/Region.cpp

+ 3 - 3
Kernel/Memory/Region.cpp

@@ -177,6 +177,8 @@ void Region::set_should_cow(size_t page_index, bool cow)
 bool Region::map_individual_page_impl(size_t page_index)
 bool Region::map_individual_page_impl(size_t page_index)
 {
 {
     VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
     VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
+    VERIFY(s_mm_lock.is_locked_by_current_processor());
+
     auto page_vaddr = vaddr_from_page_index(page_index);
     auto page_vaddr = vaddr_from_page_index(page_index);
 
 
     bool user_allowed = page_vaddr.get() >= USER_RANGE_BASE && is_user_address(page_vaddr);
     bool user_allowed = page_vaddr.get() >= USER_RANGE_BASE && is_user_address(page_vaddr);
@@ -184,9 +186,6 @@ bool Region::map_individual_page_impl(size_t page_index)
         PANIC("About to map mmap'ed page at a kernel address");
         PANIC("About to map mmap'ed page at a kernel address");
     }
     }
 
 
-    // NOTE: We have to take the MM lock for PTE's to stay valid while we use them.
-    SpinlockLocker mm_locker(s_mm_lock);
-
     auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
     auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
     if (!pte)
     if (!pte)
         return false;
         return false;
@@ -215,6 +214,7 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
     if (!translate_vmobject_page(page_index))
     if (!translate_vmobject_page(page_index))
         return true; // not an error, region doesn't map this page
         return true; // not an error, region doesn't map this page
     SpinlockLocker page_lock(m_page_directory->get_lock());
     SpinlockLocker page_lock(m_page_directory->get_lock());
+    SpinlockLocker lock(s_mm_lock);
     VERIFY(physical_page(page_index));
     VERIFY(physical_page(page_index));
     bool success = map_individual_page_impl(page_index);
     bool success = map_individual_page_impl(page_index);
     if (with_flush)
     if (with_flush)