Kernel: Fix partial munmap() deallocating still-in-use VM
We were always returning the full VM range of the partially-unmapped Region to the range allocator. This caused us to re-use those addresses for subsequent VM allocations. This patch also skips creating a new VMObject in partial munmap(). Instead we just make split regions that point into the same VMObject. This fixes the mysterious GCC ICE on large C++ programs.
This commit is contained in:
parent
d5f3972012
commit
2584636d19
Notes:
sideshowbarker
2024-07-19 11:58:40 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/2584636d196
4 changed files with 17 additions and 12 deletions
|
@ -108,9 +108,9 @@ static unsigned prot_to_region_access_flags(int prot)
|
|||
return access;
|
||||
}
|
||||
|
||||
Region& Process::allocate_split_region(const Region& source_region, const Range& range)
|
||||
Region& Process::allocate_split_region(const Region& source_region, const Range& range, size_t offset_in_vmo)
|
||||
{
|
||||
m_regions.append(Region::create_user_accessible(range, source_region.name(), source_region.access()));
|
||||
m_regions.append(Region::create_user_accessible(range, source_region.vmobject(), offset_in_vmo, source_region.name(), source_region.access()));
|
||||
return m_regions.last();
|
||||
}
|
||||
|
||||
|
@ -244,20 +244,24 @@ int Process::sys$munmap(void* addr, size_t size)
|
|||
auto remaining_ranges_after_unmap = old_region_range.carve(range_to_unmap);
|
||||
ASSERT(!remaining_ranges_after_unmap.is_empty());
|
||||
auto make_replacement_region = [&](const Range& new_range) -> Region& {
|
||||
auto& new_region = allocate_split_region(*old_region, new_range);
|
||||
ASSERT(new_range.base() >= old_region_range.base());
|
||||
ASSERT(new_range.end() <= old_region_range.end());
|
||||
size_t new_range_offset_in_old_region = new_range.base().get() - old_region_range.base().get();
|
||||
size_t first_physical_page_of_new_region_in_old_region = new_range_offset_in_old_region / PAGE_SIZE;
|
||||
for (size_t i = 0; i < new_region.page_count(); ++i) {
|
||||
new_region.vmobject().physical_pages()[i] = old_region->vmobject().physical_pages()[first_physical_page_of_new_region_in_old_region + i];
|
||||
}
|
||||
return new_region;
|
||||
return allocate_split_region(*old_region, new_range, new_range_offset_in_old_region);
|
||||
};
|
||||
Vector<Region*, 2> new_regions;
|
||||
for (auto& new_range : remaining_ranges_after_unmap) {
|
||||
new_regions.unchecked_append(&make_replacement_region(new_range));
|
||||
}
|
||||
|
||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
MM.unmap_region(*old_region, false);
|
||||
deallocate_region(*old_region);
|
||||
|
||||
// Instead we give back the unwanted VM manually.
|
||||
page_directory().range_allocator().deallocate(range_to_unmap);
|
||||
|
||||
// And finally we map the new region(s).
|
||||
for (auto* new_region : new_regions) {
|
||||
MM.map_region(*this, *new_region);
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ public:
|
|||
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
|
||||
bool deallocate_region(Region& region);
|
||||
|
||||
Region& allocate_split_region(const Region& source_region, const Range&);
|
||||
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmo);
|
||||
|
||||
void set_being_inspected(bool b) { m_being_inspected = b; }
|
||||
bool is_being_inspected() const { return m_being_inspected; }
|
||||
|
|
|
@ -705,7 +705,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
|
|||
}
|
||||
}
|
||||
|
||||
bool MemoryManager::unmap_region(Region& region)
|
||||
bool MemoryManager::unmap_region(Region& region, bool deallocate_range)
|
||||
{
|
||||
ASSERT(region.page_directory());
|
||||
InterruptDisabler disabler;
|
||||
|
@ -722,7 +722,8 @@ bool MemoryManager::unmap_region(Region& region)
|
|||
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
|
||||
#endif
|
||||
}
|
||||
region.page_directory()->range_allocator().deallocate({ region.vaddr(), region.size() });
|
||||
if (deallocate_range)
|
||||
region.page_directory()->range_allocator().deallocate(region.range());
|
||||
region.release_page_directory();
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ public:
|
|||
PageFaultResponse handle_page_fault(const PageFault&);
|
||||
|
||||
bool map_region(Process&, Region&);
|
||||
bool unmap_region(Region&);
|
||||
bool unmap_region(Region&, bool deallocate_range = true);
|
||||
|
||||
void populate_page_directory(PageDirectory&);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue