From 3e909c0c49d687f1e1aa53cf9a8ed7ffecffbf2a Mon Sep 17 00:00:00 2001 From: Idan Horowitz Date: Thu, 5 Aug 2021 20:58:09 +0300 Subject: [PATCH] Kernel: Remove double-counting of allocated pages in AnonymousVMObject When constructing an AnonymousVMObject with the AllocateNow allocation strategy we accidentally allocated the committed pages directly through MemoryManager instead of taking them from our m_unused_physical_pages CommittedPhysicalPageSet, which meant they were counted as allocated in MemoryManager, but were still counted as unallocated in the PageSet, who would then try to uncommit them on destruction, resulting in a failed assertion. To help prevent similar issues in the future a Badge was added to MM::allocate_committed_user_physical_page to prevent allocation of commited pages not via a CommittedPhysicalPageSet. --- Kernel/VM/AnonymousVMObject.cpp | 2 +- Kernel/VM/MemoryManager.cpp | 4 ++-- Kernel/VM/MemoryManager.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp index 5bd9297d665..84f8e17b266 100644 --- a/Kernel/VM/AnonymousVMObject.cpp +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -131,7 +131,7 @@ AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, O if (strategy == AllocationStrategy::AllocateNow) { // Allocate all pages right now. We know we can get all because we committed the amount needed for (size_t i = 0; i < page_count(); ++i) - physical_pages()[i] = MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes); + physical_pages()[i] = m_unused_committed_pages->take_one(); } else { auto& initial_page = (strategy == AllocationStrategy::Reserve) ? MM.lazy_committed_page() : MM.shared_zero_page(); for (size_t i = 0; i < page_count(); ++i) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 2df04696696..5e769fa6eb7 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -840,7 +840,7 @@ RefPtr MemoryManager::find_free_user_physical_page(bool committed) return page; } -NonnullRefPtr MemoryManager::allocate_committed_user_physical_page(ShouldZeroFill should_zero_fill) +NonnullRefPtr MemoryManager::allocate_committed_user_physical_page(Badge, ShouldZeroFill should_zero_fill) { ScopedSpinLock lock(s_mm_lock); auto page = find_free_user_physical_page(true); @@ -1134,7 +1134,7 @@ NonnullRefPtr CommittedPhysicalPageSet::take_one() { VERIFY(m_page_count > 0); --m_page_count; - return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes); + return MM.allocate_committed_user_physical_page({}, MemoryManager::ShouldZeroFill::Yes); } void CommittedPhysicalPageSet::uncommit_one() diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index f548e182cab..20aafea3d48 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -174,7 +174,7 @@ public: Optional commit_user_physical_pages(size_t page_count); void uncommit_user_physical_pages(Badge, size_t page_count); - NonnullRefPtr allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes); + NonnullRefPtr allocate_committed_user_physical_page(Badge, ShouldZeroFill = ShouldZeroFill::Yes); RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); RefPtr allocate_supervisor_physical_page(); NonnullRefPtrVector allocate_contiguous_supervisor_physical_pages(size_t size);