Browse Source

Kernel: Remove unnecessary counting of VMObject-attached Regions

VMObject already has an IntrusiveList of all the Regions that map it.
We were keeping a counter in addition to this, and only using it in
a single place to avoid iterating over the list in case it only had
1 entry.

Simplify VMObject by removing this counter and always iterating the
list even if there's only 1 entry. :^)
Andreas Kling 4 years ago
parent
commit
0d963fd641
3 changed files with 5 additions and 16 deletions
  1. 4 10
      Kernel/VM/Region.cpp
  2. 1 1
      Kernel/VM/VMObject.cpp
  3. 0 5
      Kernel/VM/VMObject.h

+ 4 - 10
Kernel/VM/Region.cpp

@@ -225,18 +225,12 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
 
 
 bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
 bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
 {
 {
-    bool success = true;
     auto& vmobject = this->vmobject();
     auto& vmobject = this->vmobject();
-    ScopedSpinLock lock(vmobject.m_lock);
-    if (vmobject.is_shared_by_multiple_regions()) {
-        vmobject.for_each_region([&](auto& region) {
-            if (!region.do_remap_vmobject_page(page_index, with_flush))
-                success = false;
-        });
-    } else {
-        if (!do_remap_vmobject_page(page_index, with_flush))
+    bool success = true;
+    vmobject.for_each_region([&](auto& region) {
+        if (!region.do_remap_vmobject_page(page_index, with_flush))
             success = false;
             success = false;
-    }
+    });
     return success;
     return success;
 }
 }
 
 

+ 1 - 1
Kernel/VM/VMObject.cpp

@@ -31,7 +31,7 @@ VMObject::~VMObject()
     }
     }
 
 
     MM.unregister_vmobject(*this);
     MM.unregister_vmobject(*this);
-    VERIFY(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
+    VERIFY(m_regions.is_empty());
 }
 }
 
 
 }
 }

+ 0 - 5
Kernel/VM/VMObject.h

@@ -52,19 +52,15 @@ public:
     ALWAYS_INLINE void add_region(Region& region)
     ALWAYS_INLINE void add_region(Region& region)
     {
     {
         ScopedSpinLock locker(m_lock);
         ScopedSpinLock locker(m_lock);
-        m_regions_count++;
         m_regions.append(region);
         m_regions.append(region);
     }
     }
 
 
     ALWAYS_INLINE void remove_region(Region& region)
     ALWAYS_INLINE void remove_region(Region& region)
     {
     {
         ScopedSpinLock locker(m_lock);
         ScopedSpinLock locker(m_lock);
-        m_regions_count--;
         m_regions.remove(region);
         m_regions.remove(region);
     }
     }
 
 
-    ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; }
-
     void register_on_deleted_handler(VMObjectDeletedHandler& handler)
     void register_on_deleted_handler(VMObjectDeletedHandler& handler)
     {
     {
         ScopedSpinLock locker(m_on_deleted_lock);
         ScopedSpinLock locker(m_on_deleted_lock);
@@ -93,7 +89,6 @@ private:
     VMObject& operator=(VMObject&&) = delete;
     VMObject& operator=(VMObject&&) = delete;
     VMObject(VMObject&&) = delete;
     VMObject(VMObject&&) = delete;
 
 
-    Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_regions_count { 0 };
     HashTable<VMObjectDeletedHandler*> m_on_deleted;
     HashTable<VMObjectDeletedHandler*> m_on_deleted;
     SpinLock<u8> m_on_deleted_lock;
     SpinLock<u8> m_on_deleted_lock;