Kernel: Remove redundant hash map of page tables in PageDirectory

The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.

However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!

Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:

Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().

The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!

This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
This commit is contained in:
Andreas Kling 2022-01-10 16:00:46 +01:00
parent bdbff9df24
commit 24ecf1d021
Notes: sideshowbarker 2024-07-17 21:15:11 +09:00
3 changed files with 34 additions and 43 deletions

View file

@ -123,7 +123,7 @@ UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
auto end = end_of_prekernel_image.page_base().get();
for (auto i = start; i <= end; i += PAGE_SIZE)
release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No, UnsafeIgnoreMissingPageTable::Yes);
release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No);
flush_tlb(&kernel_page_directory(), VirtualAddress(start), (end - start) / PAGE_SIZE);
}
@ -505,9 +505,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// so finish setting up the kernel page directory
m_kernel_page_directory->allocate_kernel_directory();
// Now create legit PhysicalPage objects for the page tables we created, so that
// we can put them into kernel_page_directory().m_page_tables
auto& kernel_page_tables = kernel_page_directory().m_page_tables;
// Now create legit PhysicalPage objects for the page tables we created.
virtual_page_array_current_page = virtual_page_array_base;
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
VERIFY(virtual_page_array_current_page <= range.end().get());
@ -515,8 +513,9 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
auto physical_page = adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(MayReturnToFreeList::No));
auto result = kernel_page_tables.set(virtual_page_array_current_page & ~0x1fffff, move(physical_page));
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
// NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
(void)physical_page.leak_ref();
virtual_page_array_current_page += (PAGE_SIZE / sizeof(PageTableEntry)) * PAGE_SIZE;
}
@ -568,39 +567,38 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
auto* pd = quickmap_pd(page_directory, page_directory_table_index);
PageDirectoryEntry& pde = pd[page_directory_index];
if (!pde.is_present()) {
bool did_purge = false;
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
if (!page_table) {
dbgln("MM: Unable to allocate page table to map {}", vaddr);
return nullptr;
}
if (did_purge) {
// If any memory had to be purged, ensure_pte may have been called as part
// of the purging process. So we need to re-map the pd in this case to ensure
// we're writing to the correct underlying physical page
pd = quickmap_pd(page_directory, page_directory_table_index);
VERIFY(&pde == &pd[page_directory_index]); // Sanity check
auto& pde = pd[page_directory_index];
if (pde.is_present())
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
VERIFY(!pde.is_present()); // Should have not changed
}
pde.set_page_table_base(page_table->paddr().get());
pde.set_user_allowed(true);
pde.set_present(true);
pde.set_writable(true);
pde.set_global(&page_directory == m_kernel_page_directory.ptr());
// Use page_directory_table_index and page_directory_index as key
// This allows us to release the page table entry when no longer needed
auto result = page_directory.m_page_tables.set(vaddr.get() & ~(FlatPtr)0x1fffff, page_table.release_nonnull());
// If you're hitting this VERIFY on x86_64 chances are a 64-bit pointer was truncated somewhere
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
bool did_purge = false;
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
if (!page_table) {
dbgln("MM: Unable to allocate page table to map {}", vaddr);
return nullptr;
}
if (did_purge) {
// If any memory had to be purged, ensure_pte may have been called as part
// of the purging process. So we need to re-map the pd in this case to ensure
// we're writing to the correct underlying physical page
pd = quickmap_pd(page_directory, page_directory_table_index);
VERIFY(&pde == &pd[page_directory_index]); // Sanity check
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
VERIFY(!pde.is_present()); // Should have not changed
}
pde.set_page_table_base(page_table->paddr().get());
pde.set_user_allowed(true);
pde.set_present(true);
pde.set_writable(true);
pde.set_global(&page_directory == m_kernel_page_directory.ptr());
// NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
(void)page_table.leak_ref();
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
}
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release, UnsafeIgnoreMissingPageTable unsafe_ignore_missing_page_table)
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
@ -627,10 +625,8 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
}
}
if (all_clear) {
get_physical_page_entry(PhysicalAddress { pde.page_table_base() }).allocated.physical_page.unref();
pde.clear();
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
VERIFY(unsafe_ignore_missing_page_table == UnsafeIgnoreMissingPageTable::Yes || result);
}
}
}

View file

@ -278,11 +278,7 @@ private:
Yes,
No
};
enum class UnsafeIgnoreMissingPageTable {
Yes,
No
};
void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease, UnsafeIgnoreMissingPageTable = UnsafeIgnoreMissingPageTable::No);
void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease);
RefPtr<PageDirectory> m_kernel_page_directory;

View file

@ -64,7 +64,6 @@ private:
#else
RefPtr<PhysicalPage> m_directory_pages[4];
#endif
HashMap<FlatPtr, NonnullRefPtr<PhysicalPage>> m_page_tables;
RecursiveSpinlock m_lock;
};