Jelajahi Sumber

Kernel: Move sys$munmap functionality into a helper method

Gunnar Beutner 4 tahun lalu
induk
melakukan
95c2166ca9
5 mengubah file dengan 123 tambahan dan 94 penghapusan
  1. 3 94
      Kernel/Syscalls/mmap.cpp
  2. 18 0
      Kernel/VM/Range.cpp
  3. 2 0
      Kernel/VM/Range.h
  4. 98 0
      Kernel/VM/Space.cpp
  5. 2 0
      Kernel/VM/Space.h

+ 3 - 94
Kernel/Syscalls/mmap.cpp

@@ -507,100 +507,9 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
 {
     REQUIRE_PROMISE(stdio);
 
-    if (!size)
-        return EINVAL;
-
-    auto range_or_error = expand_range_to_page_boundaries(addr, size);
-    if (range_or_error.is_error())
-        return range_or_error.error();
-
-    auto range_to_unmap = range_or_error.value();
-
-    if (!is_user_range(range_to_unmap))
-        return EFAULT;
-
-    if (auto* whole_region = space().find_region_from_range(range_to_unmap)) {
-        if (!whole_region->is_mmap())
-            return EPERM;
-
-        PerformanceManager::add_unmap_perf_event(*this, whole_region->range());
-
-        bool success = space().deallocate_region(*whole_region);
-        VERIFY(success);
-        return 0;
-    }
-
-    if (auto* old_region = space().find_region_containing(range_to_unmap)) {
-        if (!old_region->is_mmap())
-            return EPERM;
-
-        // Remove the old region from our regions tree, since were going to add another region
-        // with the exact same start address, but dont deallocate it yet
-        auto region = space().take_region(*old_region);
-        VERIFY(region);
-
-        // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
-        region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
-
-        auto new_regions = space().split_region_around_range(*region, range_to_unmap);
-
-        // Instead we give back the unwanted VM manually.
-        space().page_directory().range_allocator().deallocate(range_to_unmap);
-
-        // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
-        for (auto* new_region : new_regions) {
-            new_region->map(space().page_directory());
-        }
-
-        if (auto* event_buffer = current_perf_events_buffer()) {
-            [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, range_to_unmap.base().get(), range_to_unmap.size(), nullptr);
-        }
-
-        return 0;
-    }
-
-    // Try again while checkin multiple regions at a time
-    // slow: without caching
-    const auto& regions = space().find_regions_intersecting(range_to_unmap);
-
-    // Check if any of the regions is not mmapped, to not accidentally
-    // error-out with just half a region map left
-    for (auto* region : regions) {
-        if (!region->is_mmap())
-            return EPERM;
-    }
-
-    Vector<Region*, 2> new_regions;
-
-    for (auto* old_region : regions) {
-        // if it's a full match we can delete the complete old region
-        if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
-            bool res = space().deallocate_region(*old_region);
-            VERIFY(res);
-            continue;
-        }
-
-        // Remove the old region from our regions tree, since were going to add another region
-        // with the exact same start address, but dont deallocate it yet
-        auto region = space().take_region(*old_region);
-        VERIFY(region);
-
-        // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
-        region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
-
-        // Otherwise just split the regions and collect them for future mapping
-        if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap)))
-            return ENOMEM;
-    }
-    // Instead we give back the unwanted VM manually at the end.
-    space().page_directory().range_allocator().deallocate(range_to_unmap);
-    // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
-    for (auto* new_region : new_regions) {
-        new_region->map(space().page_directory());
-    }
-
-    PerformanceManager::add_unmap_perf_event(*this, range_to_unmap);
-
+    auto result = space().unmap_mmap_range(VirtualAddress { addr }, size);
+    if (result.is_error())
+        return result;
     return 0;
 }
 

+ 18 - 0
Kernel/VM/Range.cpp

@@ -7,6 +7,7 @@
 
 #include <AK/Vector.h>
 #include <Kernel/Arch/x86/CPU.h>
+#include <Kernel/VM/MemoryManager.h>
 #include <Kernel/VM/Range.h>
 
 namespace Kernel {
@@ -35,4 +36,21 @@ Range Range::intersect(const Range& other) const
     return Range(new_base, (new_end - new_base).get());
 }
 
+KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size)
+{
+    if (page_round_up_would_wrap(size))
+        return EINVAL;
+
+    if ((address + size) < address)
+        return EINVAL;
+
+    if (page_round_up_would_wrap(address + size))
+        return EINVAL;
+
+    auto base = VirtualAddress { address }.page_base();
+    auto end = page_round_up(address + size);
+
+    return Range { base, end - base.get() };
+}
+
 }

+ 2 - 0
Kernel/VM/Range.h

@@ -50,6 +50,8 @@ public:
     Vector<Range, 2> carve(const Range&) const;
     Range intersect(const Range&) const;
 
+    static KResultOr<Range> expand_to_page_boundaries(FlatPtr address, size_t size);
+
 private:
     VirtualAddress m_base;
     size_t m_size { 0 };

+ 98 - 0
Kernel/VM/Space.cpp

@@ -6,6 +6,7 @@
  */
 
 #include <AK/QuickSort.h>
+#include <Kernel/PerformanceManager.h>
 #include <Kernel/Process.h>
 #include <Kernel/SpinLock.h>
 #include <Kernel/VM/AnonymousVMObject.h>
@@ -37,6 +38,103 @@ Space::~Space()
 {
 }
 
+KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
+{
+    if (!size)
+        return EINVAL;
+
+    auto range_or_error = Range::expand_to_page_boundaries(addr.get(), size);
+    if (range_or_error.is_error())
+        return range_or_error.error();
+
+    auto range_to_unmap = range_or_error.value();
+
+    if (!is_user_range(range_to_unmap))
+        return EFAULT;
+
+    if (auto* whole_region = find_region_from_range(range_to_unmap)) {
+        if (!whole_region->is_mmap())
+            return EPERM;
+
+        PerformanceManager::add_unmap_perf_event(*Process::current(), whole_region->range());
+
+        bool success = deallocate_region(*whole_region);
+        VERIFY(success);
+        return KSuccess;
+    }
+
+    if (auto* old_region = find_region_containing(range_to_unmap)) {
+        if (!old_region->is_mmap())
+            return EPERM;
+
+        // Remove the old region from our regions tree, since were going to add another region
+        // with the exact same start address, but dont deallocate it yet
+        auto region = take_region(*old_region);
+        VERIFY(region);
+
+        // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
+        region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
+
+        auto new_regions = split_region_around_range(*region, range_to_unmap);
+
+        // Instead we give back the unwanted VM manually.
+        page_directory().range_allocator().deallocate(range_to_unmap);
+
+        // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
+        for (auto* new_region : new_regions) {
+            new_region->map(page_directory());
+        }
+
+        PerformanceManager::add_unmap_perf_event(*Process::current(), range_to_unmap);
+
+        return KSuccess;
+    }
+
+    // Try again while checkin multiple regions at a time
+    // slow: without caching
+    const auto& regions = find_regions_intersecting(range_to_unmap);
+
+    // Check if any of the regions is not mmapped, to not accidentally
+    // error-out with just half a region map left
+    for (auto* region : regions) {
+        if (!region->is_mmap())
+            return EPERM;
+    }
+
+    Vector<Region*, 2> new_regions;
+
+    for (auto* old_region : regions) {
+        // if it's a full match we can delete the complete old region
+        if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
+            bool res = deallocate_region(*old_region);
+            VERIFY(res);
+            continue;
+        }
+
+        // Remove the old region from our regions tree, since were going to add another region
+        // with the exact same start address, but dont deallocate it yet
+        auto region = take_region(*old_region);
+        VERIFY(region);
+
+        // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
+        region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
+
+        // Otherwise just split the regions and collect them for future mapping
+        if (new_regions.try_append(split_region_around_range(*region, range_to_unmap)))
+            return ENOMEM;
+    }
+    // Instead we give back the unwanted VM manually at the end.
+    page_directory().range_allocator().deallocate(range_to_unmap);
+    // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
+    for (auto* new_region : new_regions) {
+        new_region->map(page_directory());
+    }
+
+    PerformanceManager::add_unmap_perf_event(*Process::current(), range_to_unmap);
+
+    return KSuccess;
+}
+
 Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
 {
     vaddr.mask(PAGE_MASK);

+ 2 - 0
Kernel/VM/Space.h

@@ -33,6 +33,8 @@ public:
 
     void dump_regions();
 
+    KResult unmap_mmap_range(VirtualAddress, size_t);
+
     Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
 
     KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);