mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-12-11 08:50:37 +00:00
Kernel: Use a FixedArray for VMObject::m_physical_pages
This makes VMObject 8 bytes smaller since we can use the array size as the page count. The size() is now also computed from the page count instead of being a separate value. This makes sizes always be a multiple of PAGE_SIZE, which is sane.
This commit is contained in:
parent
5096eaa845
commit
b67200dfea
Notes:
sideshowbarker
2024-07-19 12:50:23 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/b67200dfead
5 changed files with 19 additions and 39 deletions
|
@ -315,7 +315,7 @@ Optional<KBuffer> procfs$pid_vmo(InodeIdentifier identifier)
|
|||
region.vmo().is_anonymous() ? "anonymous" : "file-backed",
|
||||
®ion.vmo(),
|
||||
region.vmo().ref_count());
|
||||
for (int i = 0; i < region.vmo().page_count(); ++i) {
|
||||
for (size_t i = 0; i < region.vmo().page_count(); ++i) {
|
||||
auto& physical_page = region.vmo().physical_pages()[i];
|
||||
builder.appendf("P%x%s(%u) ",
|
||||
physical_page ? physical_page->paddr().get() : 0,
|
||||
|
|
|
@ -3,27 +3,25 @@
|
|||
|
||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
|
||||
{
|
||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
||||
return adopt(*new AnonymousVMObject(size));
|
||||
}
|
||||
|
||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
||||
return adopt(*new AnonymousVMObject(paddr, size));
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(size_t size)
|
||||
: VMObject(size, ShouldFillPhysicalPages::Yes)
|
||||
: VMObject(size)
|
||||
{
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
||||
: VMObject(size, ShouldFillPhysicalPages::No)
|
||||
: VMObject(size)
|
||||
{
|
||||
for (size_t i = 0; i < size; i += PAGE_SIZE)
|
||||
m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false));
|
||||
ASSERT(m_physical_pages.size() == page_count());
|
||||
ASSERT(paddr.page_base() == paddr.get());
|
||||
for (size_t i = 0; i < page_count(); ++i)
|
||||
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false);
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
||||
|
|
|
@ -19,7 +19,7 @@ NonnullRefPtr<VMObject> InodeVMObject::clone()
|
|||
}
|
||||
|
||||
InodeVMObject::InodeVMObject(Inode& inode)
|
||||
: VMObject(ceil_div(inode.size(), PAGE_SIZE) * PAGE_SIZE, ShouldFillPhysicalPages::Yes)
|
||||
: VMObject(inode.size())
|
||||
, m_inode(inode)
|
||||
{
|
||||
}
|
||||
|
@ -43,18 +43,8 @@ void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new
|
|||
|
||||
InterruptDisabler disabler;
|
||||
|
||||
auto old_page_count = page_count();
|
||||
m_size = new_size;
|
||||
|
||||
if (page_count() > old_page_count) {
|
||||
// Add null pages and let the fault handler page these in when that day comes.
|
||||
for (auto i = old_page_count; i < page_count(); ++i)
|
||||
m_physical_pages.append(nullptr);
|
||||
} else {
|
||||
// Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
|
||||
for (auto i = page_count(); i < old_page_count; ++i)
|
||||
m_physical_pages.take_last();
|
||||
}
|
||||
auto new_page_count = PAGE_ROUND_UP(new_size);
|
||||
m_physical_pages.resize(new_page_count);
|
||||
|
||||
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
||||
for_each_region([](Region& region) {
|
||||
|
|
|
@ -4,18 +4,15 @@
|
|||
#include <Kernel/VM/VMObject.h>
|
||||
|
||||
VMObject::VMObject(const VMObject& other)
|
||||
: m_size(other.m_size)
|
||||
, m_physical_pages(other.m_physical_pages)
|
||||
: m_physical_pages(other.m_physical_pages)
|
||||
{
|
||||
MM.register_vmo(*this);
|
||||
}
|
||||
|
||||
VMObject::VMObject(size_t size, ShouldFillPhysicalPages should_fill_physical_pages)
|
||||
: m_size(size)
|
||||
VMObject::VMObject(size_t size)
|
||||
: m_physical_pages(ceil_div(size, PAGE_SIZE))
|
||||
{
|
||||
MM.register_vmo(*this);
|
||||
if (should_fill_physical_pages == ShouldFillPhysicalPages::Yes)
|
||||
m_physical_pages.resize(page_count());
|
||||
}
|
||||
|
||||
VMObject::~VMObject()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
#include <AK/RefCounted.h>
|
||||
#include <AK/RefPtr.h>
|
||||
#include <AK/Vector.h>
|
||||
#include <AK/FixedArray.h>
|
||||
#include <AK/Weakable.h>
|
||||
#include <Kernel/Lock.h>
|
||||
|
||||
|
@ -21,25 +21,20 @@ public:
|
|||
virtual bool is_anonymous() const { return false; }
|
||||
virtual bool is_inode() const { return false; }
|
||||
|
||||
int page_count() const { return m_size / PAGE_SIZE; }
|
||||
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||
size_t page_count() const { return m_physical_pages.size(); }
|
||||
const FixedArray<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||
FixedArray<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||
|
||||
size_t size() const { return m_size; }
|
||||
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
|
||||
|
||||
protected:
|
||||
enum ShouldFillPhysicalPages {
|
||||
No = 0,
|
||||
Yes
|
||||
};
|
||||
VMObject(size_t, ShouldFillPhysicalPages);
|
||||
explicit VMObject(size_t);
|
||||
explicit VMObject(const VMObject&);
|
||||
|
||||
template<typename Callback>
|
||||
void for_each_region(Callback);
|
||||
|
||||
size_t m_size { 0 };
|
||||
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
||||
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
||||
|
||||
private:
|
||||
VMObject& operator=(const VMObject&) = delete;
|
||||
|
|
Loading…
Reference in a new issue