Ver código fonte

Kernel: Don't bother with page tables for PROT_NONE mappings

When mapping or unmapping completely inaccessible memory regions,
we don't need to update the page tables at all. This saves a bunch of
time in some situations, most notably during dynamic linking, where we
make a large VM reservation and immediately throw it away. :^)
Andreas Kling 3 anos atrás
pai
commit
c6adefcfc0
2 arquivos alterados com 18 adições e 5 exclusões
  1. 8 1
      Kernel/Memory/AddressSpace.cpp
  2. 10 4
      Kernel/Memory/Region.cpp

+ 8 - 1
Kernel/Memory/AddressSpace.cpp

@@ -191,7 +191,14 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const&
         region_name = TRY(KString::try_create(name));
     auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
     auto* added_region = TRY(add_region(move(region)));
-    TRY(added_region->map(page_directory(), ShouldFlushTLB::No));
+    if (prot == PROT_NONE) {
+        // For PROT_NONE mappings, we don't have to set up any page table mappings.
+        // We do still need to attach the region to the page_directory though.
+        SpinlockLocker mm_locker(s_mm_lock);
+        added_region->set_page_directory(page_directory());
+    } else {
+        TRY(added_region->map(page_directory(), ShouldFlushTLB::No));
+    }
     return added_region;
 }
 

+ 10 - 4
Kernel/Memory/Region.cpp

@@ -52,10 +52,16 @@ Region::~Region()
         MM.unregister_kernel_region(*this);
 
     if (m_page_directory) {
-        SpinlockLocker page_lock(m_page_directory->get_lock());
-        SpinlockLocker lock(s_mm_lock);
-        unmap(ShouldDeallocateVirtualRange::Yes);
-        VERIFY(!m_page_directory);
+        SpinlockLocker pd_locker(m_page_directory->get_lock());
+        if (!is_readable() && !is_writable() && !is_executable()) {
+            // If the region is "PROT_NONE", we didn't map it in the first place,
+            // so all we need to do here is deallocate the VM.
+            m_page_directory->range_allocator().deallocate(range());
+        } else {
+            SpinlockLocker mm_locker(s_mm_lock);
+            unmap_with_locks_held(ShouldDeallocateVirtualRange::Yes, ShouldFlushTLB::Yes, pd_locker, mm_locker);
+            VERIFY(!m_page_directory);
+        }
     }
 }