Kernel: Put all VMObjects in an InlineLinkedList instead of a HashTable

Using a HashTable to track "all instances of Foo" is only useful if we
actually need to look up entries by some kind of index. And since they
are HashTable (not HashMap), the pointer *is* the index.

Since we have the pointer, we can just use it directly. Duh.
This increase sizeof(VMObject) by two pointers, but removes a global
table that had an entry for every VMObject, where the cost was higher.
It also avoids all the general hash tabling business when creating or
destroying VMObjects. Generally we should do more of this. :^)
This commit is contained in:
Andreas Kling 2019-08-08 10:43:44 +02:00
parent fffd3a67ad
commit a96d76fd90
Notes: sideshowbarker 2024-07-19 12:49:21 +09:00
4 changed files with 32 additions and 14 deletions

View file

@ -399,17 +399,19 @@ Optional<KBuffer> procfs$self(InodeIdentifier)
Optional<KBuffer> procfs$mm(InodeIdentifier)
{
// FIXME: Implement
InterruptDisabler disabler;
KBufferBuilder builder;
for (auto* vmo : MM.m_vmos) {
builder.appendf("VMO: %p %s(%u): p:%4u\n",
vmo,
vmo->is_anonymous() ? "anon" : "file",
vmo->ref_count(),
vmo->page_count());
}
builder.appendf("VMO count: %u\n", MM.m_vmos.size());
u32 vmobject_count = 0;
MemoryManager::for_each_vmobject([&](auto& vmobject) {
++vmobject_count;
builder.appendf("VMObject: %p %s(%u): p:%4u\n",
&vmobject,
vmobject.is_anonymous() ? "anon" : "file",
vmobject.ref_count(),
vmobject.page_count());
return IterationDecision::Continue;
});
builder.appendf("VMO count: %u\n", vmobject_count);
builder.appendf("Free physical pages: %u\n", MM.user_physical_pages() - MM.user_physical_pages_used());
builder.appendf("Free supervisor physical pages: %u\n", MM.super_physical_pages() - MM.super_physical_pages_used());
return builder.build();

View file

@ -753,13 +753,13 @@ bool MemoryManager::validate_user_write(const Process& process, VirtualAddress v
void MemoryManager::register_vmo(VMObject& vmo)
{
InterruptDisabler disabler;
m_vmos.set(&vmo);
m_vmobjects.append(&vmo);
}
void MemoryManager::unregister_vmo(VMObject& vmo)
{
InterruptDisabler disabler;
m_vmos.remove(&vmo);
m_vmobjects.remove(&vmo);
}
void MemoryManager::register_region(Region& region)

View file

@ -80,6 +80,15 @@ public:
unsigned super_physical_pages() const { return m_super_physical_pages; }
unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
template<typename Callback>
static void for_each_vmobject(Callback callback)
{
for (auto* vmobject = MM.m_vmobjects.head(); vmobject; vmobject = vmobject->next()) {
if (callback(*vmobject) == IterationDecision::Break)
break;
}
}
private:
MemoryManager();
~MemoryManager();
@ -134,10 +143,11 @@ private:
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
HashTable<VMObject*> m_vmos;
HashTable<Region*> m_user_regions;
HashTable<Region*> m_kernel_regions;
InlineLinkedList<VMObject> m_vmobjects;
bool m_quickmap_in_use { false };
};

View file

@ -1,8 +1,9 @@
#pragma once
#include <AK/FixedArray.h>
#include <AK/InlineLinkedList.h>
#include <AK/RefCounted.h>
#include <AK/RefPtr.h>
#include <AK/FixedArray.h>
#include <AK/Weakable.h>
#include <Kernel/Lock.h>
@ -10,7 +11,8 @@ class Inode;
class PhysicalPage;
class VMObject : public RefCounted<VMObject>
, public Weakable<VMObject> {
, public Weakable<VMObject>
, public InlineLinkedListNode<VMObject> {
friend class MemoryManager;
public:
@ -27,6 +29,10 @@ public:
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
// For InlineLinkedListNode
VMObject* m_next { nullptr };
VMObject* m_prev { nullptr };
protected:
explicit VMObject(size_t);
explicit VMObject(const VMObject&);