Jelajahi Sumber

Kernel: Move VM-related files into Kernel/VM/.

Also break MemoryManager.{cpp,h} into one file per class.
Andreas Kling 6 tahun lalu
induk
melakukan
b9738fa8ac

+ 1 - 1
Kernel/Devices/BXVGADevice.cpp

@@ -1,7 +1,7 @@
 #include <Kernel/Devices/BXVGADevice.h>
 #include <Kernel/IO.h>
 #include <Kernel/PCI.h>
-#include <Kernel/MemoryManager.h>
+#include <Kernel/VM/MemoryManager.h>
 #include <Kernel/Process.h>
 #include <LibC/errno_numbers.h>
 

+ 1 - 1
Kernel/FileDescriptor.cpp

@@ -10,7 +10,7 @@
 #include <Kernel/Socket.h>
 #include <Kernel/Process.h>
 #include <Kernel/Devices/BlockDevice.h>
-#include <Kernel/MemoryManager.h>
+#include <Kernel/VM/MemoryManager.h>
 
 Retained<FileDescriptor> FileDescriptor::create(RetainPtr<Inode>&& inode)
 {

+ 1 - 1
Kernel/FileSystem/FileSystem.cpp

@@ -3,7 +3,7 @@
 #include <AK/StringBuilder.h>
 #include <LibC/errno_numbers.h>
 #include "FileSystem.h"
-#include "MemoryManager.h"
+#include <Kernel/VM/MemoryManager.h>
 #include <Kernel/LocalSocket.h>
 
 static dword s_lastFileSystemID;

+ 1 - 1
Kernel/FileSystem/ProcFS.cpp

@@ -2,7 +2,7 @@
 #include "Process.h"
 #include <Kernel/FileSystem/VirtualFileSystem.h>
 #include "system.h"
-#include "MemoryManager.h"
+#include <Kernel/VM/MemoryManager.h>
 #include "StdLib.h"
 #include "i386.h"
 #include "KSyms.h"

+ 5 - 1
Kernel/Makefile

@@ -11,7 +11,11 @@ KERNEL_OBJS = \
        PIC.o \
        Syscall.o \
        Devices/IDEDiskDevice.o \
-       MemoryManager.o \
+       VM/MemoryManager.o \
+       VM/Region.o \
+       VM/VMObject.o \
+       VM/PageDirectory.o \
+       VM/PhysicalPage.o \
        Console.o \
        IRQHandler.o \
        kprintf.o \

+ 1 - 1
Kernel/Net/E1000NetworkAdapter.h

@@ -2,7 +2,7 @@
 
 #include <Kernel/Net/NetworkAdapter.h>
 #include <Kernel/PCI.h>
-#include <Kernel/MemoryManager.h>
+#include <Kernel/VM/MemoryManager.h>
 #include <Kernel/IRQHandler.h>
 #include <AK/OwnPtr.h>
 

+ 1 - 1
Kernel/Process.cpp

@@ -8,7 +8,7 @@
 #include <Kernel/FileSystem/VirtualFileSystem.h>
 #include <Kernel/Devices/NullDevice.h>
 #include <Kernel/ELF/ELFLoader.h>
-#include "MemoryManager.h"
+#include <Kernel/VM/MemoryManager.h>
 #include "i8253.h"
 #include "RTC.h"
 #include <AK/StdLibExtras.h>

+ 1 - 1
Kernel/Thread.cpp

@@ -2,7 +2,7 @@
 #include <Kernel/Scheduler.h>
 #include <Kernel/system.h>
 #include <Kernel/Process.h>
-#include <Kernel/MemoryManager.h>
+#include <Kernel/VM/MemoryManager.h>
 #include <LibC/signal_numbers.h>
 
 InlineLinkedList<Thread>* g_threads;

+ 2 - 0
Kernel/VM/.gitignore

@@ -0,0 +1,2 @@
+*.o
+*.d

+ 1 - 369
Kernel/MemoryManager.cpp → Kernel/VM/MemoryManager.cpp

@@ -1,10 +1,9 @@
-#include "MemoryManager.h"
+#include <Kernel/VM/MemoryManager.h>
 #include <AK/Assertions.h>
 #include <AK/kstdio.h>
 #include "i386.h"
 #include "StdLib.h"
 #include "Process.h"
-#include <LibC/errno_numbers.h>
 #include "CMOS.h"
 
 //#define MM_DEBUG
@@ -43,16 +42,6 @@ MemoryManager::~MemoryManager()
 {
 }
 
-PageDirectory::PageDirectory(PhysicalAddress paddr)
-{
-    m_directory_page = PhysicalPage::create_eternal(paddr, true);
-}
-
-PageDirectory::PageDirectory()
-{
-    MM.populate_page_directory(*this);
-}
-
 void MemoryManager::populate_page_directory(PageDirectory& page_directory)
 {
     page_directory.m_directory_page = allocate_supervisor_physical_page();
@@ -296,25 +285,6 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
     return true;
 }
 
-bool Region::page_in()
-{
-    ASSERT(m_page_directory);
-    ASSERT(!vmo().is_anonymous());
-    ASSERT(vmo().inode());
-#ifdef MM_DEBUG
-    dbgprintf("MM: page_in %u pages\n", page_count());
-#endif
-    for (size_t i = 0; i < page_count(); ++i) {
-        auto& vmo_page = vmo().physical_pages()[first_page_index() + i];
-        if (vmo_page.is_null()) {
-            bool success = MM.page_in_from_inode(*this, i);
-            if (!success)
-                return false;
-        }
-        MM.remap_region_page(*this, i, true);
-    }
-    return true;
-}
 
 bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
 {
@@ -625,305 +595,6 @@ bool MemoryManager::validate_user_write(const Process& process, LinearAddress la
     return region && region->is_writable();
 }
 
-Retained<Region> Region::clone()
-{
-    ASSERT(current);
-    if (m_shared || (m_readable && !m_writable)) {
-#ifdef MM_DEBUG
-        dbgprintf("%s<%u> Region::clone(): sharing %s (L%x)\n",
-                  current->process().name().characters(),
-                  current->pid(),
-                  m_name.characters(),
-                  laddr().get());
-#endif
-        // Create a new region backed by the same VMObject.
-        return adopt(*new Region(laddr(), size(), m_vmo.copy_ref(), m_offset_in_vmo, String(m_name), m_readable, m_writable));
-    }
-
-#ifdef MM_DEBUG
-    dbgprintf("%s<%u> Region::clone(): cowing %s (L%x)\n",
-              current->process().name().characters(),
-              current->pid(),
-              m_name.characters(),
-              laddr().get());
-#endif
-    // Set up a COW region. The parent (this) region becomes COW as well!
-    for (size_t i = 0; i < page_count(); ++i)
-        m_cow_map.set(i, true);
-    MM.remap_region(current->process().page_directory(), *this);
-    return adopt(*new Region(laddr(), size(), m_vmo->clone(), m_offset_in_vmo, String(m_name), m_readable, m_writable, true));
-}
-
-Region::Region(LinearAddress a, size_t s, String&& n, bool r, bool w, bool cow)
-    : m_laddr(a)
-    , m_size(s)
-    , m_vmo(VMObject::create_anonymous(s))
-    , m_name(move(n))
-    , m_readable(r)
-    , m_writable(w)
-    , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
-{
-    m_vmo->set_name(m_name);
-    MM.register_region(*this);
-}
-
-Region::Region(LinearAddress a, size_t s, RetainPtr<Inode>&& inode, String&& n, bool r, bool w)
-    : m_laddr(a)
-    , m_size(s)
-    , m_vmo(VMObject::create_file_backed(move(inode)))
-    , m_name(move(n))
-    , m_readable(r)
-    , m_writable(w)
-    , m_cow_map(Bitmap::create(m_vmo->page_count()))
-{
-    MM.register_region(*this);
-}
-
-Region::Region(LinearAddress a, size_t s, Retained<VMObject>&& vmo, size_t offset_in_vmo, String&& n, bool r, bool w, bool cow)
-    : m_laddr(a)
-    , m_size(s)
-    , m_offset_in_vmo(offset_in_vmo)
-    , m_vmo(move(vmo))
-    , m_name(move(n))
-    , m_readable(r)
-    , m_writable(w)
-    , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
-{
-    MM.register_region(*this);
-}
-
-Region::~Region()
-{
-    if (m_page_directory) {
-        MM.unmap_region(*this);
-        ASSERT(!m_page_directory);
-    }
-    MM.unregister_region(*this);
-}
-
-Retained<PhysicalPage> PhysicalPage::create_eternal(PhysicalAddress paddr, bool supervisor)
-{
-    void* slot = kmalloc_eternal(sizeof(PhysicalPage));
-    new (slot) PhysicalPage(paddr, supervisor);
-    return adopt(*(PhysicalPage*)slot);
-}
-
-Retained<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor)
-{
-    void* slot = kmalloc(sizeof(PhysicalPage));
-    new (slot) PhysicalPage(paddr, supervisor, false);
-    return adopt(*(PhysicalPage*)slot);
-}
-
-PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
-    : m_may_return_to_freelist(may_return_to_freelist)
-    , m_supervisor(supervisor)
-    , m_paddr(paddr)
-{
-    if (supervisor)
-        ++MemoryManager::s_super_physical_pages_in_existence;
-    else
-        ++MemoryManager::s_user_physical_pages_in_existence;
-}
-
-void PhysicalPage::return_to_freelist()
-{
-    ASSERT((paddr().get() & ~PAGE_MASK) == 0);
-    InterruptDisabler disabler;
-    m_retain_count = 1;
-    if (m_supervisor)
-        MM.m_free_supervisor_physical_pages.append(adopt(*this));
-    else
-        MM.m_free_physical_pages.append(adopt(*this));
-#ifdef MM_DEBUG
-    dbgprintf("MM: P%x released to freelist\n", m_paddr.get());
-#endif
-}
-
-Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
-{
-    InterruptDisabler disabler;
-    if (inode->vmo())
-        return *inode->vmo();
-    auto vmo = adopt(*new VMObject(move(inode)));
-    vmo->inode()->set_vmo(*vmo);
-    return vmo;
-}
-
-Retained<VMObject> VMObject::create_anonymous(size_t size)
-{
-    size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
-    return adopt(*new VMObject(size));
-}
-
-Retained<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
-{
-    size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
-    auto vmo = adopt(*new VMObject(paddr, size));
-    vmo->m_allow_cpu_caching = false;
-    return vmo;
-}
-
-Retained<VMObject> VMObject::clone()
-{
-    return adopt(*new VMObject(*this));
-}
-
-VMObject::VMObject(VMObject& other)
-    : m_name(other.m_name)
-    , m_anonymous(other.m_anonymous)
-    , m_inode_offset(other.m_inode_offset)
-    , m_size(other.m_size)
-    , m_inode(other.m_inode)
-    , m_physical_pages(other.m_physical_pages)
-{
-    MM.register_vmo(*this);
-}
-
-VMObject::VMObject(size_t size)
-    : m_anonymous(true)
-    , m_size(size)
-{
-    MM.register_vmo(*this);
-    m_physical_pages.resize(page_count());
-}
-
-VMObject::VMObject(PhysicalAddress paddr, size_t size)
-    : m_anonymous(true)
-    , m_size(size)
-{
-    MM.register_vmo(*this);
-    for (size_t i = 0; i < size; i += PAGE_SIZE) {
-        m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false));
-    }
-    ASSERT(m_physical_pages.size() == page_count());
-}
-
-
-VMObject::VMObject(RetainPtr<Inode>&& inode)
-    : m_inode(move(inode))
-{
-    ASSERT(m_inode);
-    m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
-    m_physical_pages.resize(page_count());
-    MM.register_vmo(*this);
-}
-
-VMObject::~VMObject()
-{
-    if (m_inode)
-        ASSERT(m_inode->vmo() == this);
-    MM.unregister_vmo(*this);
-}
-
-template<typename Callback>
-void VMObject::for_each_region(Callback callback)
-{
-    // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
-    //        Perhaps VMObject could have a Vector<Region*> with all of his mappers?
-    for (auto* region : MM.m_regions) {
-        if (&region->vmo() == this)
-            callback(*region);
-    }
-}
-
-void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
-{
-    (void)old_size;
-    InterruptDisabler disabler;
-
-    size_t old_page_count = page_count();
-    m_size = new_size;
-
-    if (page_count() > old_page_count) {
-        // Add null pages and let the fault handler page these in when that day comes.
-        for (size_t i = old_page_count; i < page_count(); ++i)
-            m_physical_pages.append(nullptr);
-    } else {
-        // Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
-        for (size_t i = page_count(); i < old_page_count; ++i)
-            m_physical_pages.take_last();
-    }
-
-    // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
-    for_each_region([] (Region& region) {
-        ASSERT(region.page_directory());
-        MM.remap_region(*region.page_directory(), region);
-    });
-}
-
-void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const byte* data)
-{
-    (void)size;
-    (void)data;
-    InterruptDisabler disabler;
-    ASSERT(offset >= 0);
-
-    // FIXME: Only invalidate the parts that actually changed.
-    for (auto& physical_page : m_physical_pages)
-        physical_page = nullptr;
-
-#if 0
-    size_t current_offset = offset;
-    size_t remaining_bytes = size;
-    const byte* data_ptr = data;
-
-    auto to_page_index = [] (size_t offset) -> size_t {
-        return offset / PAGE_SIZE;
-    };
-
-    if (current_offset & PAGE_MASK) {
-        size_t page_index = to_page_index(current_offset);
-        size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
-        if (m_physical_pages[page_index]) {
-            auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
-            memcpy(ptr, data_ptr, bytes_to_copy);
-            MM.unquickmap_page();
-        }
-        current_offset += bytes_to_copy;
-        data += bytes_to_copy;
-        remaining_bytes -= bytes_to_copy;
-    }
-
-    for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
-        size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
-        if (m_physical_pages[page_index]) {
-            auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
-            memcpy(ptr, data_ptr, bytes_to_copy);
-            MM.unquickmap_page();
-        }
-        current_offset += bytes_to_copy;
-        data += bytes_to_copy;
-    }
-#endif
-
-    // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
-    for_each_region([] (Region& region) {
-        ASSERT(region.page_directory());
-        MM.remap_region(*region.page_directory(), region);
-    });
-}
-
-int Region::commit()
-{
-    InterruptDisabler disabler;
-#ifdef MM_DEBUG
-    dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at L%x\n", vmo().page_count(), this, &vmo(), laddr().get());
-#endif
-    for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
-        if (!vmo().physical_pages()[i].is_null())
-            continue;
-        auto physical_page = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
-        if (!physical_page) {
-            kprintf("MM: commit was unable to allocate a physical page\n");
-            return -ENOMEM;
-        }
-        vmo().physical_pages()[i] = move(physical_page);
-        MM.remap_region_page(*this, i, true);
-    }
-    return 0;
-}
-
 void MemoryManager::register_vmo(VMObject& vmo)
 {
     InterruptDisabler disabler;
@@ -948,45 +619,6 @@ void MemoryManager::unregister_region(Region& region)
     m_regions.remove(&region);
 }
 
-size_t Region::amount_resident() const
-{
-    size_t bytes = 0;
-    for (size_t i = 0; i < page_count(); ++i) {
-        if (m_vmo->physical_pages()[first_page_index() + i])
-            bytes += PAGE_SIZE;
-    }
-    return bytes;
-}
-
-size_t Region::amount_shared() const
-{
-    size_t bytes = 0;
-    for (size_t i = 0; i < page_count(); ++i) {
-        auto& physical_page = m_vmo->physical_pages()[first_page_index() + i];
-        if (physical_page && physical_page->retain_count() > 1)
-            bytes += PAGE_SIZE;
-    }
-    return bytes;
-}
-
-PageDirectory::~PageDirectory()
-{
-#ifdef MM_DEBUG
-    dbgprintf("MM: ~PageDirectory K%x\n", this);
-#endif
-}
-
-void PageDirectory::flush(LinearAddress laddr)
-{
-#ifdef MM_DEBUG
-    dbgprintf("MM: Flush page L%x\n", laddr.get());
-#endif
-    if (!current)
-        return;
-    if (&current->process().page_directory() == this)
-        MM.flush_tlb(laddr);
-}
-
 ProcessPagingScope::ProcessPagingScope(Process& process)
 {
     ASSERT(current);

+ 4 - 196
Kernel/MemoryManager.h → Kernel/VM/MemoryManager.h

@@ -11,7 +11,10 @@
 #include <AK/AKString.h>
 #include <AK/Badge.h>
 #include <AK/Weakable.h>
-#include <Kernel/FileSystem/VirtualFileSystem.h>
+#include <Kernel/VM/PhysicalPage.h>
+#include <Kernel/VM/Region.h>
+#include <Kernel/VM/VMObject.h>
+#include <Kernel/FileSystem/InodeIdentifier.h>
 
 #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
 
@@ -22,201 +25,6 @@ enum class PageFaultResponse {
     Continue,
 };
 
-class PhysicalPage {
-    friend class MemoryManager;
-    friend class PageDirectory;
-    friend class VMObject;
-public:
-    PhysicalAddress paddr() const { return m_paddr; }
-
-    void retain()
-    {
-        ASSERT(m_retain_count);
-        ++m_retain_count;
-    }
-
-    void release()
-    {
-        ASSERT(m_retain_count);
-        if (!--m_retain_count) {
-            if (m_may_return_to_freelist)
-                return_to_freelist();
-            else
-                delete this;
-        }
-    }
-
-    static Retained<PhysicalPage> create_eternal(PhysicalAddress, bool supervisor);
-    static Retained<PhysicalPage> create(PhysicalAddress, bool supervisor);
-
-    unsigned short retain_count() const { return m_retain_count; }
-
-private:
-    PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
-    ~PhysicalPage() { }
-
-    void return_to_freelist();
-
-    unsigned short m_retain_count { 1 };
-    bool m_may_return_to_freelist { true };
-    bool m_supervisor { false };
-    PhysicalAddress m_paddr;
-};
-
-class PageDirectory : public Retainable<PageDirectory> {
-    friend class MemoryManager;
-public:
-    static Retained<PageDirectory> create() { return adopt(*new PageDirectory); }
-    static Retained<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
-    ~PageDirectory();
-
-    dword cr3() const { return m_directory_page->paddr().get(); }
-    dword* entries() { return reinterpret_cast<dword*>(cr3()); }
-
-    void flush(LinearAddress);
-
-private:
-    PageDirectory();
-    explicit PageDirectory(PhysicalAddress);
-
-    RetainPtr<PhysicalPage> m_directory_page;
-    HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
-};
-
-class VMObject : public Retainable<VMObject>, public Weakable<VMObject> {
-    friend class MemoryManager;
-public:
-    static Retained<VMObject> create_file_backed(RetainPtr<Inode>&&);
-    static Retained<VMObject> create_anonymous(size_t);
-    static Retained<VMObject> create_for_physical_range(PhysicalAddress, size_t);
-    Retained<VMObject> clone();
-
-    ~VMObject();
-    bool is_anonymous() const { return m_anonymous; }
-
-    Inode* inode() { return m_inode.ptr(); }
-    const Inode* inode() const { return m_inode.ptr(); }
-    size_t inode_offset() const { return m_inode_offset; }
-
-    String name() const { return m_name; }
-    void set_name(const String& name) { m_name = name; }
-
-    size_t page_count() const { return m_size / PAGE_SIZE; }
-    const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
-    Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
-
-    void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const byte*);
-    void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
-
-    size_t size() const { return m_size; }
-
-private:
-    VMObject(RetainPtr<Inode>&&);
-    explicit VMObject(VMObject&);
-    explicit VMObject(size_t);
-    VMObject(PhysicalAddress, size_t);
-
-    template<typename Callback> void for_each_region(Callback);
-
-    String m_name;
-    bool m_anonymous { false };
-    off_t m_inode_offset { 0 };
-    size_t m_size { 0 };
-    bool m_allow_cpu_caching { true };
-    RetainPtr<Inode> m_inode;
-    Vector<RetainPtr<PhysicalPage>> m_physical_pages;
-    Lock m_paging_lock;
-};
-
-class Region : public Retainable<Region> {
-    friend class MemoryManager;
-public:
-    Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
-    Region(LinearAddress, size_t, Retained<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
-    Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
-    ~Region();
-
-    LinearAddress laddr() const { return m_laddr; }
-    size_t size() const { return m_size; }
-    bool is_readable() const { return m_readable; }
-    bool is_writable() const { return m_writable; }
-    String name() const { return m_name; }
-
-    void set_name(String&& name) { m_name = move(name); }
-
-    const VMObject& vmo() const { return *m_vmo; }
-    VMObject& vmo() { return *m_vmo; }
-
-    bool is_shared() const { return m_shared; }
-    void set_shared(bool shared) { m_shared = shared; }
-
-    bool is_bitmap() const { return m_is_bitmap; }
-    void set_is_bitmap(bool b) { m_is_bitmap = b; }
-
-    Retained<Region> clone();
-    bool contains(LinearAddress laddr) const
-    {
-        return laddr >= m_laddr && laddr < m_laddr.offset(size());
-    }
-
-    unsigned page_index_from_address(LinearAddress laddr) const
-    {
-        return (laddr - m_laddr).get() / PAGE_SIZE;
-    }
-
-    size_t first_page_index() const
-    {
-        return m_offset_in_vmo / PAGE_SIZE;
-    }
-
-    size_t last_page_index() const
-    {
-        return (first_page_index() + page_count()) - 1;
-    }
-
-    size_t page_count() const
-    {
-        return m_size / PAGE_SIZE;
-    }
-
-    bool page_in();
-    int commit();
-
-    size_t amount_resident() const;
-    size_t amount_shared() const;
-
-    PageDirectory* page_directory() { return m_page_directory.ptr(); }
-
-    void set_page_directory(PageDirectory& page_directory)
-    {
-        ASSERT(!m_page_directory || m_page_directory.ptr() == &page_directory);
-        m_page_directory = page_directory;
-    }
-
-    void release_page_directory()
-    {
-        ASSERT(m_page_directory);
-        m_page_directory.clear();
-    }
-
-    const Bitmap& cow_map() const { return m_cow_map; }
-
-    void set_writable(bool b) { m_writable = b; }
-
-private:
-    RetainPtr<PageDirectory> m_page_directory;
-    LinearAddress m_laddr;
-    size_t m_size { 0 };
-    size_t m_offset_in_vmo { 0 };
-    Retained<VMObject> m_vmo;
-    String m_name;
-    bool m_readable { true };
-    bool m_writable { true };
-    bool m_shared { false };
-    bool m_is_bitmap { false };
-    Bitmap m_cow_map;
-};
-
 #define MM MemoryManager::the()
 
 class MemoryManager {

+ 32 - 0
Kernel/VM/PageDirectory.cpp

@@ -0,0 +1,32 @@
+#include <Kernel/VM/PageDirectory.h>
+#include <Kernel/VM/MemoryManager.h>
+#include <Kernel/Process.h>
+#include <Kernel/Thread.h>
+
+PageDirectory::PageDirectory(PhysicalAddress paddr)
+{
+    m_directory_page = PhysicalPage::create_eternal(paddr, true);
+}
+
+PageDirectory::PageDirectory()
+{
+    MM.populate_page_directory(*this);
+}
+
+PageDirectory::~PageDirectory()
+{
+#ifdef MM_DEBUG
+    dbgprintf("MM: ~PageDirectory K%x\n", this);
+#endif
+}
+
+void PageDirectory::flush(LinearAddress laddr)
+{
+#ifdef MM_DEBUG
+    dbgprintf("MM: Flush page L%x\n", laddr.get());
+#endif
+    if (!current)
+        return;
+    if (&current->process().page_directory() == this)
+        MM.flush_tlb(laddr);
+}

+ 26 - 0
Kernel/VM/PageDirectory.h

@@ -0,0 +1,26 @@
+#pragma once
+
+#include <Kernel/VM/PhysicalPage.h>
+#include <AK/HashMap.h>
+#include <AK/Retainable.h>
+#include <AK/RetainPtr.h>
+
+class PageDirectory : public Retainable<PageDirectory> {
+    friend class MemoryManager;
+public:
+    static Retained<PageDirectory> create() { return adopt(*new PageDirectory); }
+    static Retained<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
+    ~PageDirectory();
+
+    dword cr3() const { return m_directory_page->paddr().get(); }
+    dword* entries() { return reinterpret_cast<dword*>(cr3()); }
+
+    void flush(LinearAddress);
+
+private:
+    PageDirectory();
+    explicit PageDirectory(PhysicalAddress);
+
+    RetainPtr<PhysicalPage> m_directory_page;
+    HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
+};

+ 42 - 0
Kernel/VM/PhysicalPage.cpp

@@ -0,0 +1,42 @@
+#include <Kernel/VM/PhysicalPage.h>
+#include <Kernel/VM/MemoryManager.h>
+#include <Kernel/kmalloc.h>
+
+Retained<PhysicalPage> PhysicalPage::create_eternal(PhysicalAddress paddr, bool supervisor)
+{
+    void* slot = kmalloc_eternal(sizeof(PhysicalPage));
+    new (slot) PhysicalPage(paddr, supervisor);
+    return adopt(*(PhysicalPage*)slot);
+}
+
+Retained<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor)
+{
+    void* slot = kmalloc(sizeof(PhysicalPage));
+    new (slot) PhysicalPage(paddr, supervisor, false);
+    return adopt(*(PhysicalPage*)slot);
+}
+
+PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
+    : m_may_return_to_freelist(may_return_to_freelist)
+    , m_supervisor(supervisor)
+    , m_paddr(paddr)
+{
+    if (supervisor)
+        ++MemoryManager::s_super_physical_pages_in_existence;
+    else
+        ++MemoryManager::s_user_physical_pages_in_existence;
+}
+
+void PhysicalPage::return_to_freelist()
+{
+    ASSERT((paddr().get() & ~PAGE_MASK) == 0);
+    InterruptDisabler disabler;
+    m_retain_count = 1;
+    if (m_supervisor)
+        MM.m_free_supervisor_physical_pages.append(adopt(*this));
+    else
+        MM.m_free_physical_pages.append(adopt(*this));
+#ifdef MM_DEBUG
+    dbgprintf("MM: P%x released to freelist\n", m_paddr.get());
+#endif
+}

+ 46 - 0
Kernel/VM/PhysicalPage.h

@@ -0,0 +1,46 @@
+#pragma once
+
+#include <Kernel/Assertions.h>
+#include <Kernel/types.h>
+#include <AK/Retained.h>
+
+class PhysicalPage {
+    friend class MemoryManager;
+    friend class PageDirectory;
+    friend class VMObject;
+public:
+    PhysicalAddress paddr() const { return m_paddr; }
+
+    void retain()
+    {
+        ASSERT(m_retain_count);
+        ++m_retain_count;
+    }
+
+    void release()
+    {
+        ASSERT(m_retain_count);
+        if (!--m_retain_count) {
+            if (m_may_return_to_freelist)
+                return_to_freelist();
+            else
+                delete this;
+        }
+    }
+
+    static Retained<PhysicalPage> create_eternal(PhysicalAddress, bool supervisor);
+    static Retained<PhysicalPage> create(PhysicalAddress, bool supervisor);
+
+    word retain_count() const { return m_retain_count; }
+
+private:
+    PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
+    ~PhysicalPage() { }
+
+    void return_to_freelist();
+
+    word m_retain_count { 1 };
+    bool m_may_return_to_freelist { true };
+    bool m_supervisor { false };
+    PhysicalAddress m_paddr;
+};

+ 142 - 0
Kernel/VM/Region.cpp

@@ -0,0 +1,142 @@
+#include <Kernel/VM/Region.h>
+#include <Kernel/VM/VMObject.h>
+#include <Kernel/VM/MemoryManager.h>
+#include <Kernel/Process.h>
+#include <Kernel/Thread.h>
+
+Region::Region(LinearAddress a, size_t s, String&& n, bool r, bool w, bool cow)
+    : m_laddr(a)
+    , m_size(s)
+    , m_vmo(VMObject::create_anonymous(s))
+    , m_name(move(n))
+    , m_readable(r)
+    , m_writable(w)
+    , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
+{
+    m_vmo->set_name(m_name);
+    MM.register_region(*this);
+}
+
+Region::Region(LinearAddress a, size_t s, RetainPtr<Inode>&& inode, String&& n, bool r, bool w)
+    : m_laddr(a)
+    , m_size(s)
+    , m_vmo(VMObject::create_file_backed(move(inode)))
+    , m_name(move(n))
+    , m_readable(r)
+    , m_writable(w)
+    , m_cow_map(Bitmap::create(m_vmo->page_count()))
+{
+    MM.register_region(*this);
+}
+
+Region::Region(LinearAddress a, size_t s, Retained<VMObject>&& vmo, size_t offset_in_vmo, String&& n, bool r, bool w, bool cow)
+    : m_laddr(a)
+    , m_size(s)
+    , m_offset_in_vmo(offset_in_vmo)
+    , m_vmo(move(vmo))
+    , m_name(move(n))
+    , m_readable(r)
+    , m_writable(w)
+    , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
+{
+    MM.register_region(*this);
+}
+
+Region::~Region()
+{
+    if (m_page_directory) {
+        MM.unmap_region(*this);
+        ASSERT(!m_page_directory);
+    }
+    MM.unregister_region(*this);
+}
+
+bool Region::page_in()
+{
+    ASSERT(m_page_directory);
+    ASSERT(!vmo().is_anonymous());
+    ASSERT(vmo().inode());
+#ifdef MM_DEBUG
+    dbgprintf("MM: page_in %u pages\n", page_count());
+#endif
+    for (size_t i = 0; i < page_count(); ++i) {
+        auto& vmo_page = vmo().physical_pages()[first_page_index() + i];
+        if (vmo_page.is_null()) {
+            bool success = MM.page_in_from_inode(*this, i);
+            if (!success)
+                return false;
+        }
+        MM.remap_region_page(*this, i, true);
+    }
+    return true;
+}
+
+Retained<Region> Region::clone()
+{
+    ASSERT(current);
+    if (m_shared || (m_readable && !m_writable)) {
+#ifdef MM_DEBUG
+        dbgprintf("%s<%u> Region::clone(): sharing %s (L%x)\n",
+                  current->process().name().characters(),
+                  current->pid(),
+                  m_name.characters(),
+                  laddr().get());
+#endif
+        // Create a new region backed by the same VMObject.
+        return adopt(*new Region(laddr(), size(), m_vmo.copy_ref(), m_offset_in_vmo, String(m_name), m_readable, m_writable));
+    }
+
+#ifdef MM_DEBUG
+    dbgprintf("%s<%u> Region::clone(): cowing %s (L%x)\n",
+              current->process().name().characters(),
+              current->pid(),
+              m_name.characters(),
+              laddr().get());
+#endif
+    // Set up a COW region. The parent (this) region becomes COW as well!
+    for (size_t i = 0; i < page_count(); ++i)
+        m_cow_map.set(i, true);
+    MM.remap_region(current->process().page_directory(), *this);
+    return adopt(*new Region(laddr(), size(), m_vmo->clone(), m_offset_in_vmo, String(m_name), m_readable, m_writable, true));
+}
+
+int Region::commit()
+{
+    InterruptDisabler disabler;
+#ifdef MM_DEBUG
+    dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at L%x\n", vmo().page_count(), this, &vmo(), laddr().get());
+#endif
+    for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
+        if (!vmo().physical_pages()[i].is_null())
+            continue;
+        auto physical_page = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
+        if (!physical_page) {
+            kprintf("MM: commit was unable to allocate a physical page\n");
+            return -ENOMEM;
+        }
+        vmo().physical_pages()[i] = move(physical_page);
+        MM.remap_region_page(*this, i, true);
+    }
+    return 0;
+}
+
+size_t Region::amount_resident() const
+{
+    size_t bytes = 0;
+    for (size_t i = 0; i < page_count(); ++i) {
+        if (m_vmo->physical_pages()[first_page_index() + i])
+            bytes += PAGE_SIZE;
+    }
+    return bytes;
+}
+
+size_t Region::amount_shared() const
+{
+    size_t bytes = 0;
+    for (size_t i = 0; i < page_count(); ++i) {
+        auto& physical_page = m_vmo->physical_pages()[first_page_index() + i];
+        if (physical_page && physical_page->retain_count() > 1)
+            bytes += PAGE_SIZE;
+    }
+    return bytes;
+}

+ 97 - 0
Kernel/VM/Region.h

@@ -0,0 +1,97 @@
+#pragma once
+
+#include <AK/AKString.h>
+#include <AK/Bitmap.h>
+#include <Kernel/VM/PageDirectory.h>
+
+class Inode;
+class VMObject;
+
+class Region : public Retainable<Region> {
+    friend class MemoryManager;
+public:
+    Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
+    Region(LinearAddress, size_t, Retained<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
+    Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
+    ~Region();
+
+    LinearAddress laddr() const { return m_laddr; }
+    size_t size() const { return m_size; }
+    bool is_readable() const { return m_readable; }
+    bool is_writable() const { return m_writable; }
+    String name() const { return m_name; }
+
+    void set_name(String&& name) { m_name = move(name); }
+
+    const VMObject& vmo() const { return *m_vmo; }
+    VMObject& vmo() { return *m_vmo; }
+
+    bool is_shared() const { return m_shared; }
+    void set_shared(bool shared) { m_shared = shared; }
+
+    bool is_bitmap() const { return m_is_bitmap; }
+    void set_is_bitmap(bool b) { m_is_bitmap = b; }
+
+    Retained<Region> clone();
+    bool contains(LinearAddress laddr) const
+    {
+        return laddr >= m_laddr && laddr < m_laddr.offset(size());
+    }
+
+    unsigned page_index_from_address(LinearAddress laddr) const
+    {
+        return (laddr - m_laddr).get() / PAGE_SIZE;
+    }
+
+    size_t first_page_index() const
+    {
+        return m_offset_in_vmo / PAGE_SIZE;
+    }
+
+    size_t last_page_index() const
+    {
+        return (first_page_index() + page_count()) - 1;
+    }
+
+    size_t page_count() const
+    {
+        return m_size / PAGE_SIZE;
+    }
+
+    bool page_in();
+    int commit();
+
+    size_t amount_resident() const;
+    size_t amount_shared() const;
+
+    PageDirectory* page_directory() { return m_page_directory.ptr(); }
+
+    void set_page_directory(PageDirectory& page_directory)
+    {
+        ASSERT(!m_page_directory || m_page_directory.ptr() == &page_directory);
+        m_page_directory = page_directory;
+    }
+
+    void release_page_directory()
+    {
+        ASSERT(m_page_directory);
+        m_page_directory.clear();
+    }
+
+    const Bitmap& cow_map() const { return m_cow_map; }
+
+    void set_writable(bool b) { m_writable = b; }
+
+private:
+    RetainPtr<PageDirectory> m_page_directory;
+    LinearAddress m_laddr;
+    size_t m_size { 0 };
+    size_t m_offset_in_vmo { 0 };
+    Retained<VMObject> m_vmo;
+    String m_name;
+    bool m_readable { true };
+    bool m_writable { true };
+    bool m_shared { false };
+    bool m_is_bitmap { false };
+    Bitmap m_cow_map;
+};

+ 167 - 0
Kernel/VM/VMObject.cpp

@@ -0,0 +1,167 @@
+#include <Kernel/VM/VMObject.h>
+#include <Kernel/VM/MemoryManager.h>
+#include <FileSystem/FileSystem.h>
+
+Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
+{
+    InterruptDisabler disabler;
+    if (inode->vmo())
+        return *inode->vmo();
+    auto vmo = adopt(*new VMObject(move(inode)));
+    vmo->inode()->set_vmo(*vmo);
+    return vmo;
+}
+
+Retained<VMObject> VMObject::create_anonymous(size_t size)
+{
+    size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
+    return adopt(*new VMObject(size));
+}
+
+Retained<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
+{
+    size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
+    auto vmo = adopt(*new VMObject(paddr, size));
+    vmo->m_allow_cpu_caching = false;
+    return vmo;
+}
+
+Retained<VMObject> VMObject::clone()
+{
+    return adopt(*new VMObject(*this));
+}
+
+VMObject::VMObject(VMObject& other)
+    : m_name(other.m_name)
+    , m_anonymous(other.m_anonymous)
+    , m_inode_offset(other.m_inode_offset)
+    , m_size(other.m_size)
+    , m_inode(other.m_inode)
+    , m_physical_pages(other.m_physical_pages)
+{
+    MM.register_vmo(*this);
+}
+
+VMObject::VMObject(size_t size)
+    : m_anonymous(true)
+    , m_size(size)
+{
+    MM.register_vmo(*this);
+    m_physical_pages.resize(page_count());
+}
+
+VMObject::VMObject(PhysicalAddress paddr, size_t size)
+    : m_anonymous(true)
+    , m_size(size)
+{
+    MM.register_vmo(*this);
+    for (size_t i = 0; i < size; i += PAGE_SIZE) {
+        m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false));
+    }
+    ASSERT(m_physical_pages.size() == page_count());
+}
+
+
+VMObject::VMObject(RetainPtr<Inode>&& inode)
+    : m_inode(move(inode))
+{
+    ASSERT(m_inode);
+    m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
+    m_physical_pages.resize(page_count());
+    MM.register_vmo(*this);
+}
+
+VMObject::~VMObject()
+{
+    if (m_inode)
+        ASSERT(m_inode->vmo() == this);
+    MM.unregister_vmo(*this);
+}
+
+template<typename Callback>
+void VMObject::for_each_region(Callback callback)
+{
+    // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
+    //        Perhaps VMObject could have a Vector<Region*> with all of his mappers?
+    for (auto* region : MM.m_regions) {
+        if (&region->vmo() == this)
+            callback(*region);
+    }
+}
+
+void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
+{
+    (void)old_size;
+    InterruptDisabler disabler;
+
+    size_t old_page_count = page_count();
+    m_size = new_size;
+
+    if (page_count() > old_page_count) {
+        // Add null pages and let the fault handler page these in when that day comes.
+        for (size_t i = old_page_count; i < page_count(); ++i)
+            m_physical_pages.append(nullptr);
+    } else {
+        // Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
+        for (size_t i = page_count(); i < old_page_count; ++i)
+            m_physical_pages.take_last();
+    }
+
+    // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
+    for_each_region([] (Region& region) {
+        ASSERT(region.page_directory());
+        MM.remap_region(*region.page_directory(), region);
+    });
+}
+
+void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const byte* data)
+{
+    (void)size;
+    (void)data;
+    InterruptDisabler disabler;
+    ASSERT(offset >= 0);
+
+    // FIXME: Only invalidate the parts that actually changed.
+    for (auto& physical_page : m_physical_pages)
+        physical_page = nullptr;
+
+#if 0
+    size_t current_offset = offset;
+    size_t remaining_bytes = size;
+    const byte* data_ptr = data;
+
+    auto to_page_index = [] (size_t offset) -> size_t {
+        return offset / PAGE_SIZE;
+    };
+
+    if (current_offset & PAGE_MASK) {
+        size_t page_index = to_page_index(current_offset);
+        size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
+        if (m_physical_pages[page_index]) {
+            auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
+            memcpy(ptr, data_ptr, bytes_to_copy);
+            MM.unquickmap_page();
+        }
+        current_offset += bytes_to_copy;
+        data += bytes_to_copy;
+        remaining_bytes -= bytes_to_copy;
+    }
+
+    for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
+        size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
+        if (m_physical_pages[page_index]) {
+            auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
+            memcpy(ptr, data_ptr, bytes_to_copy);
+            MM.unquickmap_page();
+        }
+        current_offset += bytes_to_copy;
+        data += bytes_to_copy;
+    }
+#endif
+
+    // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
+    for_each_region([] (Region& region) {
+        ASSERT(region.page_directory());
+        MM.remap_region(*region.page_directory(), region);
+    });
+}

+ 57 - 0
Kernel/VM/VMObject.h

@@ -0,0 +1,57 @@
+#pragma once
+
+#include <AK/Badge.h>
+#include <AK/Retainable.h>
+#include <AK/Weakable.h>
+#include <AK/RetainPtr.h>
+#include <AK/Vector.h>
+#include <AK/AKString.h>
+#include <Kernel/Lock.h>
+
+class Inode;
+class PhysicalPage;
+
+class VMObject : public Retainable<VMObject>, public Weakable<VMObject> {
+    friend class MemoryManager;
+public:
+    static Retained<VMObject> create_file_backed(RetainPtr<Inode>&&);
+    static Retained<VMObject> create_anonymous(size_t);
+    static Retained<VMObject> create_for_physical_range(PhysicalAddress, size_t);
+    Retained<VMObject> clone();
+
+    ~VMObject();
+    bool is_anonymous() const { return m_anonymous; }
+
+    Inode* inode() { return m_inode.ptr(); }
+    const Inode* inode() const { return m_inode.ptr(); }
+    size_t inode_offset() const { return m_inode_offset; }
+
+    String name() const { return m_name; }
+    void set_name(const String& name) { m_name = name; }
+
+    size_t page_count() const { return m_size / PAGE_SIZE; }
+    const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
+    Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
+
+    void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const byte*);
+    void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
+
+    size_t size() const { return m_size; }
+
+private:
+    VMObject(RetainPtr<Inode>&&);
+    explicit VMObject(VMObject&);
+    explicit VMObject(size_t);
+    VMObject(PhysicalAddress, size_t);
+
+    template<typename Callback> void for_each_region(Callback);
+
+    String m_name;
+    bool m_anonymous { false };
+    off_t m_inode_offset { 0 };
+    size_t m_size { 0 };
+    bool m_allow_cpu_caching { true };
+    RetainPtr<Inode> m_inode;
+    Vector<RetainPtr<PhysicalPage>> m_physical_pages;
+    Lock m_paging_lock;
+};

+ 1 - 1
Kernel/i386.cpp

@@ -3,7 +3,7 @@
 #include "i386.h"
 #include "Assertions.h"
 #include "Process.h"
-#include "MemoryManager.h"
+#include <Kernel/VM/MemoryManager.h>
 #include "IRQHandler.h"
 #include "PIC.h"
 #include "Scheduler.h"

+ 1 - 1
Kernel/init.cpp

@@ -14,7 +14,7 @@
 #include <Kernel/Devices/RandomDevice.h>
 #include <Kernel/FileSystem/Ext2FileSystem.h>
 #include <Kernel/FileSystem/VirtualFileSystem.h>
-#include "MemoryManager.h"
+#include <Kernel/VM/MemoryManager.h>
 #include <Kernel/FileSystem/ProcFS.h>
 #include "RTC.h"
 #include <Kernel/TTY/VirtualConsole.h>