From f49649645ca274a3ca7b6cfbacca0b9758d24161 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 26 Dec 2021 02:42:49 +0100 Subject: [PATCH] Kernel: Allocate page tables for the entire kmalloc VM range up front This avoids getting caught with our pants down when heap expansion fails due to missing page tables. It also avoids a circular dependency on kmalloc() by way of HashMap::set() in MemoryManager::ensure_pte(). --- Kernel/Heap/kmalloc.cpp | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 17757578a06..73d0c9e96c3 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -136,15 +136,8 @@ struct KmallocGlobalData { for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) { // FIXME: We currently leak physical memory when mapping it into the kmalloc heap. auto& page = physical_pages.take_one().leak_ref(); - auto* pte = MM.ensure_pte(MM.kernel_page_directory(), vaddr); - if (!pte) { - // FIXME: If ensure_pte() fails due to lazy page directory construction, it returns nullptr - // and we're in trouble. Find a way to avoid getting into that situation. - // Perhaps we could do a dry run through the address range and ensure_pte() for each - // virtual address to ensure that each PTE is available. Not maximally efficient, - // but could work.. Needs more thought. - PANIC("Unable to acquire PTE during heap expansion"); - } + auto* pte = MM.pte(MM.kernel_page_directory(), vaddr); + VERIFY(pte); pte->set_physical_page_base(page.paddr().get()); pte->set_global(true); pte->set_user_allowed(false); @@ -160,6 +153,25 @@ struct KmallocGlobalData { return true; } + void enable_expansion() + { + // FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit. + auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB); + + expansion_data = KmallocGlobalData::ExpansionData { + .virtual_range = virtual_range.value(), + .next_virtual_address = virtual_range.value().base(), + }; + + // Make sure the entire kmalloc VM range is backed by page tables. + // This avoids having to deal with lazy page table allocation during heap expansion. + SpinlockLocker mm_locker(Memory::s_mm_lock); + SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock()); + for (auto vaddr = virtual_range.value().base(); vaddr < virtual_range.value().end(); vaddr = vaddr.offset(PAGE_SIZE)) { + MM.ensure_pte(MM.kernel_page_directory(), vaddr); + } + } + struct ExpansionData { Memory::VirtualRange virtual_range; VirtualAddress next_virtual_address; @@ -189,12 +201,7 @@ READONLY_AFTER_INIT static u8* s_end_of_eternal_range; void kmalloc_enable_expand() { - // FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit. - auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB); - g_kmalloc_global->expansion_data = KmallocGlobalData::ExpansionData { - .virtual_range = virtual_range.value(), - .next_virtual_address = virtual_range.value().base(), - }; + g_kmalloc_global->enable_expansion(); } static inline void kmalloc_verify_nospinlock_held()