Browse Source

Kernel: Make committed physical page allocation return NonnullRefPtr

Since we're taking from the committed set of pages, there should never
be a reason for this call to fail.

Also add a Badge to disallow taking committed pages from anywhere but
the Region class.
Andreas Kling 4 years ago
parent
commit
0642f8f2c6
3 changed files with 6 additions and 4 deletions
  1. 1 1
      Kernel/VM/AnonymousVMObject.cpp
  2. 1 1
      Kernel/VM/AnonymousVMObject.h
  3. 4 2
      Kernel/VM/Region.cpp

+ 1 - 1
Kernel/VM/AnonymousVMObject.cpp

@@ -362,7 +362,7 @@ size_t AnonymousVMObject::mark_committed_pages_for_nonvolatile_range(VolatilePag
     return pages_updated;
 }
 
-RefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(size_t page_index)
+NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>, size_t page_index)
 {
     {
         ScopedSpinLock lock(m_lock);

+ 1 - 1
Kernel/VM/AnonymousVMObject.h

@@ -26,7 +26,7 @@ public:
     static RefPtr<AnonymousVMObject> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
     virtual RefPtr<VMObject> try_clone() override;
 
-    RefPtr<PhysicalPage> allocate_committed_page(size_t);
+    [[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>, size_t);
     PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
     size_t cow_pages() const;
     bool should_cow(size_t page_index, bool) const;

+ 4 - 2
Kernel/VM/Region.cpp

@@ -426,7 +426,8 @@ PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<Re
         auto& page_slot = physical_page_slot(page_index_in_region);
         if (page_slot->is_lazy_committed_page()) {
             auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
-            page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject);
+            VERIFY(m_vmobject->is_anonymous());
+            page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({}, page_index_in_vmobject);
             remap_vmobject_page(page_index_in_vmobject);
             return PageFaultResponse::Continue;
         }
@@ -495,7 +496,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, ScopedS
         current_thread->did_zero_fault();
 
     if (page_slot->is_lazy_committed_page()) {
-        page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject);
+        VERIFY(m_vmobject->is_anonymous());
+        page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({}, page_index_in_vmobject);
         dbgln_if(PAGE_FAULT_DEBUG, "      >> ALLOCATED COMMITTED {}", page_slot->paddr());
     } else {
         page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);