소스 검색

Teach Process::exec() about the magic of file-backed VMO's.

This is really sweet! :^) The four instances of /bin/sh spawned at
startup now share their read-only text pages.

There are problems and limitations here, and plenty of room for
improvement. But it kinda works.
Andreas Kling 6 년 전
부모
커밋
cd1e7419f0
10개의 변경된 파일159개의 추가작업 그리고 70개의 파일을 삭제
  1. 2 6
      ELFLoader/ELFImage.cpp
  2. 2 2
      ELFLoader/ELFImage.h
  3. 16 4
      ELFLoader/ELFLoader.cpp
  4. 3 1
      ELFLoader/ELFLoader.h
  5. 54 21
      Kernel/MemoryManager.cpp
  6. 25 2
      Kernel/MemoryManager.h
  7. 4 13
      Kernel/ProcFileSystem.cpp
  8. 45 19
      Kernel/Process.cpp
  9. 3 1
      Kernel/Process.h
  10. 5 1
      Kernel/i386.cpp

+ 2 - 6
ELFLoader/ELFImage.cpp

@@ -1,7 +1,7 @@
 #include "ELFImage.h"
 #include <AK/kstdio.h>
 
-ELFImage::ELFImage(ByteBuffer&& buffer)
+ELFImage::ELFImage(const byte* buffer)
     : m_buffer(buffer)
 {
     m_valid = parse();
@@ -133,11 +133,7 @@ const char* ELFImage::table_string(unsigned offset) const
 
 const char* ELFImage::raw_data(unsigned offset) const
 {
-#ifdef SERENITY
-    return reinterpret_cast<const char*>(m_buffer.pointer()) + offset;
-#else
-    return reinterpret_cast<const char*>(m_file.pointer()) + offset;
-#endif
+    return reinterpret_cast<const char*>(m_buffer) + offset;
 }
 
 const Elf32_Ehdr& ELFImage::header() const

+ 2 - 2
ELFLoader/ELFImage.h

@@ -8,7 +8,7 @@
 
 class ELFImage {
 public:
-    explicit ELFImage(ByteBuffer&&);
+    explicit ELFImage(const byte*);
     ~ELFImage();
     void dump();
     bool is_valid() const { return m_valid; }
@@ -158,7 +158,7 @@ private:
     const char* section_header_table_string(unsigned offset) const;
     const char* section_index_to_string(unsigned index);
 
-    ByteBuffer m_buffer;
+    const byte* m_buffer { nullptr };
     HashMap<String, unsigned> m_sections;
     bool m_valid { false };
     unsigned m_symbol_table_section_index { 0 };

+ 16 - 4
ELFLoader/ELFLoader.cpp

@@ -3,8 +3,8 @@
 
 //#define ELFLOADER_DEBUG
 
-ELFLoader::ELFLoader(ByteBuffer&& buffer)
-    : m_image(move(buffer))
+ELFLoader::ELFLoader(const byte* buffer)
+    : m_image(buffer)
 {
 }
 
@@ -42,7 +42,11 @@ bool ELFLoader::layout()
 #ifdef ELFLOADER_DEBUG
         kprintf("PH: L%x %u r:%u w:%u\n", program_header.laddr().get(), program_header.size_in_memory(), program_header.is_readable(), program_header.is_writable());
 #endif
-        allocate_section(program_header.laddr(), program_header.size_in_memory(), program_header.alignment(), program_header.is_readable(), program_header.is_writable());
+        if (program_header.is_writable()) {
+            allocate_section(program_header.laddr(), program_header.size_in_memory(), program_header.alignment(), program_header.is_readable(), program_header.is_writable());
+        } else {
+            map_section(program_header.laddr(), program_header.size_in_memory(), program_header.alignment(), program_header.offset(), program_header.is_readable(), program_header.is_writable());
+        }
     });
 
     m_image.for_each_section_of_type(SHT_PROGBITS, [this, &failed] (const ELFImage::Section& section) {
@@ -201,10 +205,18 @@ bool ELFLoader::allocate_section(LinearAddress laddr, size_t size, size_t alignm
 {
     ASSERT(alloc_section_hook);
     char namebuf[16];
-    ksprintf(namebuf, "elf-%s%s", is_readable ? "r" : "", is_writable ? "w" : "");
+    ksprintf(namebuf, "elf-alloc-%s%s", is_readable ? "r" : "", is_writable ? "w" : "");
     return alloc_section_hook(laddr, size, alignment, is_readable, is_writable, namebuf);
 }
 
+bool ELFLoader::map_section(LinearAddress laddr, size_t size, size_t alignment, size_t offset_in_image, bool is_readable, bool is_writable)
+{
+    ASSERT(alloc_section_hook);
+    char namebuf[16];
+    ksprintf(namebuf, "elf-map-%s%s", is_readable ? "r" : "", is_writable ? "w" : "");
+    return map_section_hook(laddr, size, alignment, offset_in_image, is_readable, is_writable, namebuf);
+}
+
 void ELFLoader::add_symbol(String&& name, char* ptr, unsigned size)
 {
     m_symbols.set(move(name), { ptr, size });

+ 3 - 1
ELFLoader/ELFLoader.h

@@ -8,14 +8,16 @@
 
 class ELFLoader {
 public:
-    ELFLoader(ByteBuffer&&);
+    explicit ELFLoader(const byte*);
     ~ELFLoader();
 
     bool load();
     Function<void*(LinearAddress, size_t, size_t, bool, bool, const String&)> alloc_section_hook;
+    Function<void*(LinearAddress, size_t, size_t, size_t, bool, bool, const String&)> map_section_hook;
     char* symbol_ptr(const char* name);
     void add_symbol(String&& name, char* ptr, unsigned size);
     bool allocate_section(LinearAddress, size_t, size_t alignment, bool is_readable, bool is_writable);
+    bool map_section(LinearAddress, size_t, size_t alignment, size_t offset_in_image, bool is_readable, bool is_writable);
 
 private:
     bool layout();

+ 54 - 21
Kernel/MemoryManager.cpp

@@ -255,23 +255,43 @@ bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned pag
     return true;
 }
 
-bool MemoryManager::page_in_from_vnode(Process& process, Region& region, unsigned page_index_in_region)
+bool Region::page_in(PageDirectory& page_directory)
+{
+    ASSERT(!vmo().is_anonymous());
+    ASSERT(vmo().vnode());
+#ifdef MM_DEBUG
+    dbgprintf("MM: page_in %u pages\n", page_count());
+#endif
+    for (size_t i = 0; i < page_count(); ++i) {
+        auto& vmo_page = vmo().physical_pages()[first_page_index() + i];
+        if (vmo_page.is_null()) {
+            bool success = MM.page_in_from_vnode(page_directory, *this, i);
+            if (!success)
+                return false;
+        }
+        MM.remap_region_page(&page_directory, *this, i, true);
+    }
+    return true;
+}
+
+bool MemoryManager::page_in_from_vnode(PageDirectory& page_directory, Region& region, unsigned page_index_in_region)
 {
     auto& vmo = region.vmo();
     ASSERT(!vmo.is_anonymous());
     ASSERT(vmo.vnode());
     auto& vnode = *vmo.vnode();
-    ASSERT(vmo.physical_pages()[page_index_in_region].is_null());
-    vmo.physical_pages()[page_index_in_region] = allocate_physical_page();
-    if (vmo.physical_pages()[page_index_in_region].is_null()) {
+    auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
+    ASSERT(vmo_page.is_null());
+    vmo_page = allocate_physical_page();
+    if (vmo_page.is_null()) {
         kprintf("MM: page_in_from_vnode was unable to allocate a physical page\n");
         return false;
     }
-    remap_region_page(process.m_page_directory, region, page_index_in_region, true);
+    remap_region_page(&page_directory, region, page_index_in_region, true);
     byte* dest_ptr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE).asPtr();
     dbgprintf("MM: page_in_from_vnode ready to read from vnode, will write to L%x!\n", dest_ptr);
     sti(); // Oh god here we go...
-    auto nread = vnode.fileSystem()->readInodeBytes(vnode.inode, vmo.vnode_offset(), PAGE_SIZE, dest_ptr, nullptr);
+    auto nread = vnode.fileSystem()->readInodeBytes(vnode.inode, vmo.vnode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, dest_ptr, nullptr);
     if (nread < 0) {
         kprintf("MM: page_in_form_vnode had error (%d) while reading!\n", nread);
         return false;
@@ -299,7 +319,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
     if (fault.is_not_present()) {
         if (region->vmo().vnode()) {
             dbgprintf("NP(vnode) fault in Region{%p}[%u]\n", region, page_index_in_region);
-            page_in_from_vnode(*current, *region, page_index_in_region);
+            page_in_from_vnode(*current->m_page_directory, *region, page_index_in_region);
             return PageFaultResponse::Continue;
         } else {
             kprintf("NP(error) fault in Region{%p}[%u]\n", region, page_index_in_region);
@@ -440,7 +460,8 @@ void MemoryManager::map_region_at_address(PageDirectory* page_directory, Region&
 {
     InterruptDisabler disabler;
     auto& vmo = region.vmo();
-    for (size_t i = 0; i < vmo.page_count(); ++i) {
+    dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
+    for (size_t i = region.first_page_index(); i <= region.last_page_index(); ++i) {
         auto page_laddr = laddr.offset(i * PAGE_SIZE);
         auto pte = ensurePTE(page_directory, page_laddr);
         auto& physical_page = vmo.physical_pages()[i];
@@ -519,8 +540,7 @@ void MemoryManager::remove_kernel_alias_for_region(Region& region, byte* addr)
 bool MemoryManager::unmapRegion(Process& process, Region& region)
 {
     InterruptDisabler disabler;
-    auto& vmo = region.vmo();
-    for (size_t i = 0; i < vmo.page_count(); ++i) {
+    for (size_t i = 0; i < region.page_count(); ++i) {
         auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
         auto pte = ensurePTE(process.m_page_directory, laddr);
         pte.setPhysicalPageBase(0);
@@ -529,7 +549,7 @@ bool MemoryManager::unmapRegion(Process& process, Region& region)
         pte.setUserAllowed(false);
         flushTLB(laddr);
 #ifdef MM_DEBUG
-        auto& physical_page = vmo.physical_pages()[i];
+        auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i];
         dbgprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, physical_page ? physical_page->paddr().get() : 0);
 #endif
     }
@@ -580,14 +600,14 @@ RetainPtr<Region> Region::clone()
 
     if (is_readable && !is_writable) {
         // Create a new region backed by the same VMObject.
-        return adopt(*new Region(linearAddress, size, m_vmo.copyRef(), String(name), is_readable, is_writable));
+        return adopt(*new Region(linearAddress, size, m_vmo.copyRef(), m_offset_in_vmo, String(name), is_readable, is_writable));
     }
 
     // Set up a COW region. The parent (this) region becomes COW as well!
-    for (size_t i = 0; i < vmo().page_count(); ++i)
+    for (size_t i = 0; i < page_count(); ++i)
         cow_map.set(i, true);
     MM.remap_region(*current, *this);
-    return adopt(*new Region(linearAddress, size, m_vmo->clone(), String(name), is_readable, is_writable, true));
+    return adopt(*new Region(linearAddress, size, m_vmo->clone(), m_offset_in_vmo, String(name), is_readable, is_writable, true));
 }
 
 Region::Region(LinearAddress a, size_t s, String&& n, bool r, bool w, bool cow)
@@ -599,7 +619,6 @@ Region::Region(LinearAddress a, size_t s, String&& n, bool r, bool w, bool cow)
     , is_writable(w)
     , cow_map(Bitmap::create(m_vmo->page_count(), cow))
 {
-    m_vmo->set_name(name);
 }
 
 Region::Region(LinearAddress a, size_t s, RetainPtr<VirtualFileSystem::Node>&& vnode, String&& n, bool r, bool w)
@@ -611,19 +630,18 @@ Region::Region(LinearAddress a, size_t s, RetainPtr<VirtualFileSystem::Node>&& v
     , is_writable(w)
     , cow_map(Bitmap::create(m_vmo->page_count()))
 {
-    m_vmo->set_name(name);
 }
 
-Region::Region(LinearAddress a, size_t s, RetainPtr<VMObject>&& vmo, String&& n, bool r, bool w, bool cow)
+Region::Region(LinearAddress a, size_t s, RetainPtr<VMObject>&& vmo, size_t offset_in_vmo, String&& n, bool r, bool w, bool cow)
     : linearAddress(a)
     , size(s)
+    , m_offset_in_vmo(offset_in_vmo)
     , m_vmo(move(vmo))
     , name(move(n))
     , is_readable(r)
     , is_writable(w)
     , cow_map(Bitmap::create(m_vmo->page_count(), cow))
 {
-    m_vmo->set_name(name);
 }
 
 Region::~Region()
@@ -645,6 +663,7 @@ RetainPtr<VMObject> VMObject::create_file_backed(RetainPtr<VirtualFileSystem::No
     InterruptDisabler disabler;
     if (vnode->vmo())
         return static_cast<VMObject*>(vnode->vmo());
+    size = ceilDiv(size, PAGE_SIZE) * PAGE_SIZE;
     auto vmo = adopt(*new VMObject(move(vnode), size));
     vmo->vnode()->set_vmo(vmo.ptr());
     return vmo;
@@ -652,6 +671,7 @@ RetainPtr<VMObject> VMObject::create_file_backed(RetainPtr<VirtualFileSystem::No
 
 RetainPtr<VMObject> VMObject::create_anonymous(size_t size)
 {
+    size = ceilDiv(size, PAGE_SIZE) * PAGE_SIZE;
     return adopt(*new VMObject(size));
 }
 
@@ -668,12 +688,14 @@ VMObject::VMObject(VMObject& other)
     , m_vnode(other.m_vnode)
     , m_physical_pages(other.m_physical_pages)
 {
+    MM.register_vmo(*this);
 }
 
 VMObject::VMObject(size_t size)
     : m_anonymous(true)
     , m_size(size)
 {
+    MM.register_vmo(*this);
     m_physical_pages.resize(page_count());
 }
 
@@ -682,15 +704,16 @@ VMObject::VMObject(RetainPtr<VirtualFileSystem::Node>&& vnode, size_t size)
     , m_vnode(move(vnode))
 {
     m_physical_pages.resize(page_count());
+    MM.register_vmo(*this);
 }
 
 VMObject::~VMObject()
 {
-    InterruptDisabler disabler;
     if (m_vnode) {
         ASSERT(m_vnode->vmo() == this);
         m_vnode->set_vmo(nullptr);
     }
+    MM.unregister_vmo(*this);
 }
 
 int Region::commit(Process& process)
@@ -699,12 +722,12 @@ int Region::commit(Process& process)
 #ifdef MM_DEBUG
     dbgprintf("MM: commit %u pages in at L%x\n", vmo().page_count(), linearAddress.get());
 #endif
-    for (size_t i = 0; i < vmo().page_count(); ++i) {
+    for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
         if (!vmo().physical_pages()[i].is_null())
             continue;
         auto physical_page = MM.allocate_physical_page();
         if (!physical_page) {
-            kprintf("MM: page_in_from_vnode was unable to allocate a physical page\n");
+            kprintf("MM: commit was unable to allocate a physical page\n");
             return -ENOMEM;
         }
         vmo().physical_pages()[i] = move(physical_page);
@@ -712,3 +735,13 @@ int Region::commit(Process& process)
     }
     return 0;
 }
+
+void MemoryManager::register_vmo(VMObject& vmo)
+{
+    m_vmos.set(&vmo);
+}
+
+void MemoryManager::unregister_vmo(VMObject& vmo)
+{
+    m_vmos.remove(&vmo);
+}

+ 25 - 2
Kernel/MemoryManager.h

@@ -91,7 +91,7 @@ private:
 
 struct Region : public Retainable<Region> {
     Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
-    Region(LinearAddress, size_t, RetainPtr<VMObject>&&, String&&, bool r, bool w, bool cow = false);
+    Region(LinearAddress, size_t, RetainPtr<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
     Region(LinearAddress, size_t, RetainPtr<VirtualFileSystem::Node>&&, String&&, bool r, bool w);
     ~Region();
 
@@ -109,11 +109,28 @@ struct Region : public Retainable<Region> {
         return (laddr - linearAddress).get() / PAGE_SIZE;
     }
 
+    size_t first_page_index() const
+    {
+        return m_offset_in_vmo / PAGE_SIZE;
+    }
+
+    size_t last_page_index() const
+    {
+        return (first_page_index() + page_count()) - 1;
+    }
+
+    size_t page_count() const
+    {
+        return size / PAGE_SIZE;
+    }
+
+    bool page_in(PageDirectory&);
     int commit(Process&);
     int decommit(Process&);
 
     LinearAddress linearAddress;
     size_t size { 0 };
+    size_t m_offset_in_vmo { 0 };
     RetainPtr<VMObject> m_vmo;
     String name;
     bool is_readable { true };
@@ -127,6 +144,7 @@ class MemoryManager {
     AK_MAKE_ETERNAL
     friend class PhysicalPage;
     friend class Region;
+    friend class VMObject;
     friend ByteBuffer procfs$mm();
 public:
     static MemoryManager& the() PURE;
@@ -161,6 +179,9 @@ private:
     MemoryManager();
     ~MemoryManager();
 
+    void register_vmo(VMObject&);
+    void unregister_vmo(VMObject&);
+
     LinearAddress allocate_linear_address_range(size_t);
     void map_region_at_address(PageDirectory*, Region&, LinearAddress, bool user_accessible);
     void unmap_range(PageDirectory*, LinearAddress, size_t);
@@ -181,7 +202,7 @@ private:
     static Region* region_from_laddr(Process&, LinearAddress);
 
     bool copy_on_write(Process&, Region&, unsigned page_index_in_region);
-    bool page_in_from_vnode(Process&, Region&, unsigned page_index_in_region);
+    bool page_in_from_vnode(PageDirectory&, Region&, unsigned page_index_in_region);
 
     byte* quickmap_page(PhysicalPage&);
     void unquickmap_page();
@@ -273,6 +294,8 @@ private:
     LinearAddress m_next_laddr;
 
     Vector<RetainPtr<PhysicalPage>> m_free_physical_pages;
+
+    HashTable<VMObject*> m_vmos;
 };
 
 struct KernelPagingScope {

+ 4 - 13
Kernel/ProcFileSystem.cpp

@@ -167,25 +167,16 @@ void ProcFileSystem::removeProcess(Process& process)
 ByteBuffer procfs$mm()
 {
     // FIXME: Implement
-#if 0
     InterruptDisabler disabler;
-    size_t zonePageCount = 0;
-    for (auto* zone : MM.m_zones)
-        zonePageCount += zone->m_pages.size();
-    auto buffer = ByteBuffer::createUninitialized(1024 + 80 * MM.m_zones.size() + zonePageCount * 10);
+    auto buffer = ByteBuffer::createUninitialized(1024 + 80 * MM.m_vmos.size());
     char* ptr = (char*)buffer.pointer();
-    for (auto* zone : MM.m_zones) {
-        ptr += ksprintf(ptr, "Zone %p size: %u\n  ", zone, zone->size());
-        for (auto page : zone->m_pages)
-            ptr += ksprintf(ptr, "%x ", page);
-        ptr += ksprintf(ptr, "\n");
+    for (auto* vmo : MM.m_vmos) {
+        ptr += ksprintf(ptr, "VMO: %p %s (p:%u, r:%u)\n", vmo, vmo->name().characters(), vmo->page_count(), vmo->retainCount());
     }
-    ptr += ksprintf(ptr, "Zone count: %u\n", MM.m_zones.size());
+    ptr += ksprintf(ptr, "VMO count: %u\n", MM.m_vmos.size());
     ptr += ksprintf(ptr, "Free physical pages: %u\n", MM.m_free_physical_pages.size());
     buffer.trim(ptr - (char*)buffer.pointer());
     return buffer;
-#endif
-    return { };
 }
 
 

+ 45 - 19
Kernel/Process.cpp

@@ -94,12 +94,7 @@ Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name
         laddr = m_nextRegion;
         m_nextRegion = m_nextRegion.offset(size).offset(PAGE_SIZE);
     }
-
     laddr.mask(0xfffff000);
-
-    unsigned page_count = ceilDiv(size, PAGE_SIZE);
-    auto physical_pages = MM.allocate_physical_pages(page_count);
-    ASSERT(physical_pages.size() == page_count);
     m_regions.append(adopt(*new Region(laddr, size, move(name), is_readable, is_writable)));
     m_regions.last()->commit(*this);
     MM.mapRegion(*this, *m_regions.last());
@@ -115,18 +110,29 @@ Region* Process::allocate_file_backed_region(LinearAddress laddr, size_t size, R
         laddr = m_nextRegion;
         m_nextRegion = m_nextRegion.offset(size).offset(PAGE_SIZE);
     }
-
     laddr.mask(0xfffff000);
-
     unsigned page_count = ceilDiv(size, PAGE_SIZE);
-    Vector<RetainPtr<PhysicalPage>> physical_pages;
-    physical_pages.resize(page_count); // Start out with no physical pages!
-
     m_regions.append(adopt(*new Region(laddr, size, move(vnode), move(name), is_readable, is_writable)));
     MM.mapRegion(*this, *m_regions.last());
     return m_regions.last().ptr();
 }
 
+Region* Process::allocate_region_with_vmo(LinearAddress laddr, size_t size, RetainPtr<VMObject>&& vmo, size_t offset_in_vmo, String&& name, bool is_readable, bool is_writable)
+{
+    ASSERT(vmo);
+    // FIXME: This needs sanity checks. What if this overlaps existing regions?
+    if (laddr.is_null()) {
+        laddr = m_nextRegion;
+        m_nextRegion = m_nextRegion.offset(size).offset(PAGE_SIZE);
+    }
+    laddr.mask(0xfffff000);
+    offset_in_vmo &= PAGE_MASK;
+    size = ceilDiv(size, PAGE_SIZE) * PAGE_SIZE;
+    m_regions.append(adopt(*new Region(laddr, size, move(vmo), offset_in_vmo, move(name), is_readable, is_writable)));
+    MM.mapRegion(*this, *m_regions.last());
+    return m_regions.last().ptr();
+}
+
 bool Process::deallocate_region(Region& region)
 {
     InterruptDisabler disabler;
@@ -296,23 +302,42 @@ int Process::exec(const String& path, Vector<String>&& arguments, Vector<String>
     if (!descriptor->metadata().mayExecute(m_euid, m_gids))
         return -EACCES;
 
+    if (!descriptor->metadata().size) {
+        kprintf("exec() of 0-length binaries not supported\n");
+        return -ENOTIMPL;
+    }
+
+    auto vmo = VMObject::create_file_backed(descriptor->vnode(), descriptor->metadata().size);
+    auto* region = allocate_region_with_vmo(LinearAddress(), descriptor->metadata().size, vmo.copyRef(), 0, "helper", true, false);
+
+#if 0
     auto elfData = descriptor->readEntireFile();
     if (!elfData)
         return -EIO; // FIXME: Get a more detailed error from VFS.
+#endif
 
     dword entry_eip = 0;
-    PageDirectory* old_page_directory;
-    PageDirectory* new_page_directory;
+    PageDirectory* old_page_directory = m_page_directory;
+    PageDirectory* new_page_directory = reinterpret_cast<PageDirectory*>(kmalloc_page_aligned(sizeof(PageDirectory)));
+    dbgprintf("Process exec: PD=%x created\n", new_page_directory);
+    MM.populate_page_directory(*new_page_directory);
+    m_page_directory = new_page_directory;
+    MM.enter_process_paging_scope(*this);
+
+    bool success = region->page_in(*new_page_directory);
+
+    ASSERT(success);
     {
         InterruptDisabler disabler;
         // Okay, here comes the sleight of hand, pay close attention..
         auto old_regions = move(m_regions);
-        old_page_directory = m_page_directory;
-        new_page_directory = reinterpret_cast<PageDirectory*>(kmalloc_page_aligned(sizeof(PageDirectory)));
-        MM.populate_page_directory(*new_page_directory);
-        m_page_directory = new_page_directory;
-        MM.enter_process_paging_scope(*this);
-        ELFLoader loader(move(elfData));
+        ELFLoader loader(region->linearAddress.asPtr());
+        loader.map_section_hook = [&] (LinearAddress laddr, size_t size, size_t alignment, size_t offset_in_image, bool is_readable, bool is_writable, const String& name) {
+            ASSERT(size);
+            size = ((size / 4096) + 1) * 4096; // FIXME: Use ceil_div?
+            (void) allocate_region_with_vmo(laddr, size, vmo.copyRef(), offset_in_image, String(name), is_readable, is_writable);
+            return laddr.asPtr();
+        };
         loader.alloc_section_hook = [&] (LinearAddress laddr, size_t size, size_t alignment, bool is_readable, bool is_writable, const String& name) {
             ASSERT(size);
             size = ((size / 4096) + 1) * 4096; // FIXME: Use ceil_div?
@@ -580,6 +605,7 @@ Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring
     }
 
     m_page_directory = (PageDirectory*)kmalloc_page_aligned(sizeof(PageDirectory));
+    dbgprintf("Process ctor: PD=%x created\n", m_page_directory);
     MM.populate_page_directory(*m_page_directory);
 
     if (fork_parent) {
@@ -1326,7 +1352,7 @@ pid_t Process::sys$setsid()
 {
     InterruptDisabler disabler;
     bool found_process_with_same_pgid_as_my_pid = false;
-    Process::for_each_in_pgrp(pid(), [&] (auto& process) {
+    Process::for_each_in_pgrp(pid(), [&] (auto&) {
         found_process_with_same_pgid_as_my_pid = true;
         return false;
     });

+ 3 - 1
Kernel/Process.h

@@ -14,6 +14,7 @@
 class FileDescriptor;
 class PageDirectory;
 class Region;
+class VMObject;
 class Zone;
 
 #define COOL_GLOBALS
@@ -253,7 +254,8 @@ private:
     TTY* m_tty { nullptr };
 
     Region* allocate_region(LinearAddress, size_t, String&& name, bool is_readable = true, bool is_writable = true);
-    Region* allocate_file_backed_region(LinearAddress laddr, size_t size, RetainPtr<VirtualFileSystem::Node>&& vnode, String&& name, bool is_readable, bool is_writable);
+    Region* allocate_file_backed_region(LinearAddress, size_t, RetainPtr<VirtualFileSystem::Node>&& vnode, String&& name, bool is_readable, bool is_writable);
+    Region* allocate_region_with_vmo(LinearAddress, size_t, RetainPtr<VMObject>&&, size_t offset_in_vmo, String&& name, bool is_readable, bool is_writable);
     bool deallocate_region(Region& region);
 
     Region* regionFromRange(LinearAddress, size_t);

+ 5 - 1
Kernel/i386.cpp

@@ -188,11 +188,15 @@ void exception_14_handler(RegisterDumpWithExceptionCode& regs)
     dword faultAddress;
     asm ("movl %%cr2, %%eax":"=a"(faultAddress));
 
-    dbgprintf("%s(%u): ring%u %s page fault, %s L%x\n",
+    dword fault_page_directory;
+    asm ("movl %%cr3, %%eax":"=a"(fault_page_directory));
+
+    dbgprintf("%s(%u): ring%u %s page fault in PD=%x, %s L%x\n",
         current->name().characters(),
         current->pid(),
         regs.cs & 3,
         regs.exception_code & 1 ? "PV" : "NP",
+        fault_page_directory,
         regs.exception_code & 2 ? "write" : "read",
         faultAddress);