mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 23:50:19 +00:00
Kernel: Allocate page tables for the entire kmalloc VM range up front
This avoids getting caught with our pants down when heap expansion fails due to missing page tables. It also avoids a circular dependency on kmalloc() by way of HashMap::set() in MemoryManager::ensure_pte().
This commit is contained in:
parent
8a51f64503
commit
f49649645c
Notes:
sideshowbarker
2024-07-17 22:10:50 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/f49649645ca
1 changed files with 22 additions and 15 deletions
|
@ -136,15 +136,8 @@ struct KmallocGlobalData {
|
|||
for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
|
||||
// FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
|
||||
auto& page = physical_pages.take_one().leak_ref();
|
||||
auto* pte = MM.ensure_pte(MM.kernel_page_directory(), vaddr);
|
||||
if (!pte) {
|
||||
// FIXME: If ensure_pte() fails due to lazy page directory construction, it returns nullptr
|
||||
// and we're in trouble. Find a way to avoid getting into that situation.
|
||||
// Perhaps we could do a dry run through the address range and ensure_pte() for each
|
||||
// virtual address to ensure that each PTE is available. Not maximally efficient,
|
||||
// but could work.. Needs more thought.
|
||||
PANIC("Unable to acquire PTE during heap expansion");
|
||||
}
|
||||
auto* pte = MM.pte(MM.kernel_page_directory(), vaddr);
|
||||
VERIFY(pte);
|
||||
pte->set_physical_page_base(page.paddr().get());
|
||||
pte->set_global(true);
|
||||
pte->set_user_allowed(false);
|
||||
|
@ -160,6 +153,25 @@ struct KmallocGlobalData {
|
|||
return true;
|
||||
}
|
||||
|
||||
void enable_expansion()
|
||||
{
|
||||
// FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
|
||||
auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB);
|
||||
|
||||
expansion_data = KmallocGlobalData::ExpansionData {
|
||||
.virtual_range = virtual_range.value(),
|
||||
.next_virtual_address = virtual_range.value().base(),
|
||||
};
|
||||
|
||||
// Make sure the entire kmalloc VM range is backed by page tables.
|
||||
// This avoids having to deal with lazy page table allocation during heap expansion.
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
|
||||
for (auto vaddr = virtual_range.value().base(); vaddr < virtual_range.value().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
|
||||
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
struct ExpansionData {
|
||||
Memory::VirtualRange virtual_range;
|
||||
VirtualAddress next_virtual_address;
|
||||
|
@ -189,12 +201,7 @@ READONLY_AFTER_INIT static u8* s_end_of_eternal_range;
|
|||
|
||||
void kmalloc_enable_expand()
|
||||
{
|
||||
// FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
|
||||
auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB);
|
||||
g_kmalloc_global->expansion_data = KmallocGlobalData::ExpansionData {
|
||||
.virtual_range = virtual_range.value(),
|
||||
.next_virtual_address = virtual_range.value().base(),
|
||||
};
|
||||
g_kmalloc_global->enable_expansion();
|
||||
}
|
||||
|
||||
static inline void kmalloc_verify_nospinlock_held()
|
||||
|
|
Loading…
Reference in a new issue