Sfoglia il codice sorgente

Revert "Kernel: Make PhysicalPage not movable and use atomic ref counting"

This reverts commit a89ccd842becdfbc951436da5384d8819374e0f4.
Andreas Kling 4 anni fa
parent
commit
0db7e04c2e

+ 4 - 4
Kernel/VM/MemoryManager.cpp

@@ -382,7 +382,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
     return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
 }
 
-void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
+void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
 {
     ScopedSpinLock lock(s_mm_lock);
     for (auto& region : m_user_physical_regions) {
@@ -391,7 +391,7 @@ void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
             continue;
         }
 
-        region.return_page(page);
+        region.return_page(move(page));
         --m_user_physical_pages_used;
 
         return;
@@ -452,7 +452,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
     return page;
 }
 
-void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page)
+void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
 {
     ScopedSpinLock lock(s_mm_lock);
     for (auto& region : m_super_physical_regions) {
@@ -461,7 +461,7 @@ void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page
             continue;
         }
 
-        region.return_page(page);
+        region.return_page(move(page));
         --m_super_physical_pages_used;
         return;
     }

+ 2 - 2
Kernel/VM/MemoryManager.h

@@ -114,8 +114,8 @@ public:
     RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
     RefPtr<PhysicalPage> allocate_supervisor_physical_page();
     NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
-    void deallocate_user_physical_page(const PhysicalPage&);
-    void deallocate_supervisor_physical_page(const PhysicalPage&);
+    void deallocate_user_physical_page(PhysicalPage&&);
+    void deallocate_supervisor_physical_page(PhysicalPage&&);
 
     OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
     OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);

+ 7 - 3
Kernel/VM/PhysicalPage.cpp

@@ -42,14 +42,18 @@ PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_retu
 {
 }
 
-void PhysicalPage::return_to_freelist() const
+void PhysicalPage::return_to_freelist() &&
 {
     ASSERT((paddr().get() & ~PAGE_MASK) == 0);
 
+    InterruptDisabler disabler;
+
+    m_ref_count = 1;
+
     if (m_supervisor)
-        MM.deallocate_supervisor_physical_page(*this);
+        MM.deallocate_supervisor_physical_page(move(*this));
     else
-        MM.deallocate_user_physical_page(*this);
+        MM.deallocate_user_physical_page(move(*this));
 
 #ifdef MM_DEBUG
     dbg() << "MM: P" << String::format("%x", m_paddr.get()) << " released to freelist";

+ 9 - 9
Kernel/VM/PhysicalPage.h

@@ -39,29 +39,29 @@ class PhysicalPage {
     friend class PageDirectory;
     friend class VMObject;
 
-    MAKE_SLAB_ALLOCATED(PhysicalPage);
-    AK_MAKE_NONMOVABLE(PhysicalPage);
-
+    MAKE_SLAB_ALLOCATED(PhysicalPage)
 public:
     PhysicalAddress paddr() const { return m_paddr; }
 
     void ref()
     {
-        m_ref_count.fetch_add(1, AK::memory_order_acq_rel);
+        ASSERT(m_ref_count);
+        ++m_ref_count;
     }
 
     void unref()
     {
-        if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) {
+        ASSERT(m_ref_count);
+        if (!--m_ref_count) {
             if (m_may_return_to_freelist)
-                return_to_freelist();
+                move(*this).return_to_freelist();
             delete this;
         }
     }
 
     static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
 
-    u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
+    u32 ref_count() const { return m_ref_count; }
 
     bool is_shared_zero_page() const;
 
@@ -69,9 +69,9 @@ private:
     PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
     ~PhysicalPage() {}
 
-    void return_to_freelist() const;
+    void return_to_freelist() &&;
 
-    Atomic<u32> m_ref_count { 1 };
+    u32 m_ref_count { 1 };
     bool m_may_return_to_freelist { true };
     bool m_supervisor { false };
     PhysicalAddress m_paddr;

+ 2 - 2
Kernel/VM/PhysicalRegion.h

@@ -49,12 +49,12 @@ public:
     unsigned size() const { return m_pages; }
     unsigned used() const { return m_used; }
     unsigned free() const { return m_pages - m_used; }
-    bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
+    bool contains(PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
 
     RefPtr<PhysicalPage> take_free_page(bool supervisor);
     NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor);
     void return_page_at(PhysicalAddress addr);
-    void return_page(const PhysicalPage& page) { return_page_at(page.paddr()); }
+    void return_page(PhysicalPage&& page) { return_page_at(page.paddr()); }
 
 private:
     unsigned find_contiguous_free_pages(size_t count);