From a96d76fd907027994d382d99309b7b3af911b8a4 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Thu, 8 Aug 2019 10:43:44 +0200 Subject: [PATCH] Kernel: Put all VMObjects in an InlineLinkedList instead of a HashTable Using a HashTable to track "all instances of Foo" is only useful if we actually need to look up entries by some kind of index. And since they are HashTable (not HashMap), the pointer *is* the index. Since we have the pointer, we can just use it directly. Duh. This increase sizeof(VMObject) by two pointers, but removes a global table that had an entry for every VMObject, where the cost was higher. It also avoids all the general hash tabling business when creating or destroying VMObjects. Generally we should do more of this. :^) --- Kernel/FileSystem/ProcFS.cpp | 20 +++++++++++--------- Kernel/VM/MemoryManager.cpp | 4 ++-- Kernel/VM/MemoryManager.h | 12 +++++++++++- Kernel/VM/VMObject.h | 10 ++++++++-- 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index 7302c6b37ac..8f9713dde71 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -399,17 +399,19 @@ Optional procfs$self(InodeIdentifier) Optional procfs$mm(InodeIdentifier) { - // FIXME: Implement InterruptDisabler disabler; KBufferBuilder builder; - for (auto* vmo : MM.m_vmos) { - builder.appendf("VMO: %p %s(%u): p:%4u\n", - vmo, - vmo->is_anonymous() ? "anon" : "file", - vmo->ref_count(), - vmo->page_count()); - } - builder.appendf("VMO count: %u\n", MM.m_vmos.size()); + u32 vmobject_count = 0; + MemoryManager::for_each_vmobject([&](auto& vmobject) { + ++vmobject_count; + builder.appendf("VMObject: %p %s(%u): p:%4u\n", + &vmobject, + vmobject.is_anonymous() ? "anon" : "file", + vmobject.ref_count(), + vmobject.page_count()); + return IterationDecision::Continue; + }); + builder.appendf("VMO count: %u\n", vmobject_count); builder.appendf("Free physical pages: %u\n", MM.user_physical_pages() - MM.user_physical_pages_used()); builder.appendf("Free supervisor physical pages: %u\n", MM.super_physical_pages() - MM.super_physical_pages_used()); return builder.build(); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index f010faddb9e..143dd6c495c 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -753,13 +753,13 @@ bool MemoryManager::validate_user_write(const Process& process, VirtualAddress v void MemoryManager::register_vmo(VMObject& vmo) { InterruptDisabler disabler; - m_vmos.set(&vmo); + m_vmobjects.append(&vmo); } void MemoryManager::unregister_vmo(VMObject& vmo) { InterruptDisabler disabler; - m_vmos.remove(&vmo); + m_vmobjects.remove(&vmo); } void MemoryManager::register_region(Region& region) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 836ba02c204..e36043bcc8b 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -80,6 +80,15 @@ public: unsigned super_physical_pages() const { return m_super_physical_pages; } unsigned super_physical_pages_used() const { return m_super_physical_pages_used; } + template + static void for_each_vmobject(Callback callback) + { + for (auto* vmobject = MM.m_vmobjects.head(); vmobject; vmobject = vmobject->next()) { + if (callback(*vmobject) == IterationDecision::Break) + break; + } + } + private: MemoryManager(); ~MemoryManager(); @@ -134,10 +143,11 @@ private: NonnullRefPtrVector m_user_physical_regions; NonnullRefPtrVector m_super_physical_regions; - HashTable m_vmos; HashTable m_user_regions; HashTable m_kernel_regions; + InlineLinkedList m_vmobjects; + bool m_quickmap_in_use { false }; }; diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h index 0013453ab30..2d2cb14d87a 100644 --- a/Kernel/VM/VMObject.h +++ b/Kernel/VM/VMObject.h @@ -1,8 +1,9 @@ #pragma once +#include +#include #include #include -#include #include #include @@ -10,7 +11,8 @@ class Inode; class PhysicalPage; class VMObject : public RefCounted - , public Weakable { + , public Weakable + , public InlineLinkedListNode { friend class MemoryManager; public: @@ -27,6 +29,10 @@ public: size_t size() const { return m_physical_pages.size() * PAGE_SIZE; } + // For InlineLinkedListNode + VMObject* m_next { nullptr }; + VMObject* m_prev { nullptr }; + protected: explicit VMObject(size_t); explicit VMObject(const VMObject&);