Ver Fonte

Kernel: Make the x86 paging code slightly less insane.

Instead of PDE's and PTE's being weird wrappers around dword*, just have
MemoryManager::ensure_pte() return a PageDirectoryEntry&, which in turn has
a PageTableEntry* entries().

I've been trying to understand how things ended up this way, and I suspect
it was because I inadvertently invoked the PageDirectoryEntry copy ctor in
the original work on this, which must have made me very confused..

Anyways, now things are a bit saner and we can move forward towards a better
future, etc. :^)
Andreas Kling há 6 anos atrás
pai
commit
183205d51c
4 ficheiros alterados com 126 adições e 123 exclusões
  1. 105 0
      Kernel/Arch/i386/CPU.h
  2. 17 19
      Kernel/VM/MemoryManager.cpp
  3. 3 103
      Kernel/VM/MemoryManager.h
  4. 1 1
      Kernel/VM/PageDirectory.h

+ 105 - 0
Kernel/Arch/i386/CPU.h

@@ -1,11 +1,16 @@
 #pragma once
 
+#include <AK/Badge.h>
+#include <AK/Noncopyable.h>
 #include <Kernel/VirtualAddress.h>
 #include <Kernel/kstdio.h>
 
 #define PAGE_SIZE 4096
 #define PAGE_MASK 0xfffff000
 
+class MemoryManager;
+class PageTableEntry;
+
 struct [[gnu::packed]] TSS32
 {
     word backlink, __blh;
@@ -79,6 +84,106 @@ union [[gnu::packed]] Descriptor
     }
 };
 
+class PageDirectoryEntry {
+    AK_MAKE_NONCOPYABLE(PageDirectoryEntry);
+
+public:
+    PageTableEntry* page_table_base() { return reinterpret_cast<PageTableEntry*>(m_raw & 0xfffff000u); }
+    void set_page_table_base(dword value)
+    {
+        m_raw &= 0xfff;
+        m_raw |= value & 0xfffff000;
+    }
+
+    dword raw() const { return m_raw; }
+    void copy_from(Badge<MemoryManager>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
+
+    enum Flags {
+        Present = 1 << 0,
+        ReadWrite = 1 << 1,
+        UserSupervisor = 1 << 2,
+        WriteThrough = 1 << 3,
+        CacheDisabled = 1 << 4,
+    };
+
+    bool is_present() const { return raw() & Present; }
+    void set_present(bool b) { set_bit(Present, b); }
+
+    bool is_user_allowed() const { return raw() & UserSupervisor; }
+    void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
+
+    bool is_writable() const { return raw() & ReadWrite; }
+    void set_writable(bool b) { set_bit(ReadWrite, b); }
+
+    bool is_write_through() const { return raw() & WriteThrough; }
+    void set_write_through(bool b) { set_bit(WriteThrough, b); }
+
+    bool is_cache_disabled() const { return raw() & CacheDisabled; }
+    void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
+
+    void set_bit(byte bit, bool value)
+    {
+        if (value)
+            m_raw |= bit;
+        else
+            m_raw &= ~bit;
+    }
+
+private:
+    dword m_raw;
+};
+
+class PageTableEntry {
+    AK_MAKE_NONCOPYABLE(PageTableEntry);
+
+public:
+    void* physical_page_base() { return reinterpret_cast<void*>(m_raw & 0xfffff000u); }
+    void set_physical_page_base(dword value)
+    {
+        m_raw &= 0xfff;
+        m_raw |= value & 0xfffff000;
+    }
+
+    dword raw() const { return m_raw; }
+
+    enum Flags {
+        Present = 1 << 0,
+        ReadWrite = 1 << 1,
+        UserSupervisor = 1 << 2,
+        WriteThrough = 1 << 3,
+        CacheDisabled = 1 << 4,
+    };
+
+    bool is_present() const { return raw() & Present; }
+    void set_present(bool b) { set_bit(Present, b); }
+
+    bool is_user_allowed() const { return raw() & UserSupervisor; }
+    void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
+
+    bool is_writable() const { return raw() & ReadWrite; }
+    void set_writable(bool b) { set_bit(ReadWrite, b); }
+
+    bool is_write_through() const { return raw() & WriteThrough; }
+    void set_write_through(bool b) { set_bit(WriteThrough, b); }
+
+    bool is_cache_disabled() const { return raw() & CacheDisabled; }
+    void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
+
+    void set_bit(byte bit, bool value)
+    {
+        if (value)
+            m_raw |= bit;
+        else
+            m_raw &= ~bit;
+    }
+
+private:
+    dword m_raw;
+};
+
+static_assert(sizeof(PageDirectoryEntry) == 4);
+static_assert(sizeof(PageTableEntry) == 4);
+
 class IRQHandler;
 
 void gdt_init();

+ 17 - 19
Kernel/VM/MemoryManager.cpp

@@ -21,8 +21,8 @@ MemoryManager& MM
 MemoryManager::MemoryManager()
 {
     m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(0x4000));
-    m_page_table_zero = (dword*)0x6000;
-    m_page_table_one = (dword*)0x7000;
+    m_page_table_zero = (PageTableEntry*)0x6000;
+    m_page_table_one = (PageTableEntry*)0x7000;
 
     initialize_paging();
 
@@ -36,17 +36,15 @@ MemoryManager::~MemoryManager()
 void MemoryManager::populate_page_directory(PageDirectory& page_directory)
 {
     page_directory.m_directory_page = allocate_supervisor_physical_page();
-    page_directory.entries()[0] = kernel_page_directory().entries()[0];
-    page_directory.entries()[1] = kernel_page_directory().entries()[1];
+    page_directory.entries()[0].copy_from({}, kernel_page_directory().entries()[0]);
+    page_directory.entries()[1].copy_from({}, kernel_page_directory().entries()[1]);
     // Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF
     for (int i = 768; i < 1024; ++i)
-        page_directory.entries()[i] = kernel_page_directory().entries()[i];
+        page_directory.entries()[i].copy_from({}, kernel_page_directory().entries()[i]);
 }
 
 void MemoryManager::initialize_paging()
 {
-    static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
-    static_assert(sizeof(MemoryManager::PageTableEntry) == 4);
     memset(m_page_table_zero, 0, PAGE_SIZE);
     memset(m_page_table_one, 0, PAGE_SIZE);
 
@@ -167,7 +165,7 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Virtu
     // FIXME: ASSERT(vaddr is 4KB aligned);
     for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
         auto pte_address = vaddr.offset(offset);
-        auto pte = ensure_pte(page_directory, pte_address);
+        auto& pte = ensure_pte(page_directory, pte_address);
         pte.set_physical_page_base(0);
         pte.set_user_allowed(false);
         pte.set_present(true);
@@ -176,13 +174,13 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Virtu
     }
 }
 
-auto MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) -> PageTableEntry
+PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
 {
     ASSERT_INTERRUPTS_DISABLED();
     dword page_directory_index = (vaddr.get() >> 22) & 0x3ff;
     dword page_table_index = (vaddr.get() >> 12) & 0x3ff;
 
-    PageDirectoryEntry pde = PageDirectoryEntry(&page_directory.entries()[page_directory_index]);
+    PageDirectoryEntry& pde = page_directory.entries()[page_directory_index];
     if (!pde.is_present()) {
 #ifdef MM_DEBUG
         dbgprintf("MM: PDE %u not present (requested for L%x), allocating\n", page_directory_index, vaddr.get());
@@ -219,7 +217,7 @@ auto MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vad
             page_directory.m_physical_pages.set(page_directory_index, move(page_table));
         }
     }
-    return PageTableEntry(&pde.page_table_base()[page_table_index]);
+    return pde.page_table_base()[page_table_index];
 }
 
 void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
@@ -228,7 +226,7 @@ void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
     ASSERT(vaddr.is_page_aligned());
     for (dword offset = 0; offset < length; offset += PAGE_SIZE) {
         auto pte_address = vaddr.offset(offset);
-        auto pte = ensure_pte(kernel_page_directory(), pte_address);
+        auto& pte = ensure_pte(kernel_page_directory(), pte_address);
         pte.set_physical_page_base(pte_address.get());
         pte.set_user_allowed(false);
         pte.set_present(false);
@@ -243,7 +241,7 @@ void MemoryManager::create_identity_mapping(PageDirectory& page_directory, Virtu
     ASSERT((vaddr.get() & ~PAGE_MASK) == 0);
     for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
         auto pte_address = vaddr.offset(offset);
-        auto pte = ensure_pte(page_directory, pte_address);
+        auto& pte = ensure_pte(page_directory, pte_address);
         pte.set_physical_page_base(pte_address.get());
         pte.set_user_allowed(false);
         pte.set_present(true);
@@ -595,7 +593,7 @@ void MemoryManager::flush_tlb(VirtualAddress vaddr)
 
 void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr)
 {
-    auto pte = ensure_pte(kernel_page_directory(), vaddr);
+    auto& pte = ensure_pte(kernel_page_directory(), vaddr);
     pte.set_physical_page_base(paddr.get());
     pte.set_present(true);
     pte.set_writable(true);
@@ -609,7 +607,7 @@ byte* MemoryManager::quickmap_page(PhysicalPage& physical_page)
     ASSERT(!m_quickmap_in_use);
     m_quickmap_in_use = true;
     auto page_vaddr = m_quickmap_addr;
-    auto pte = ensure_pte(kernel_page_directory(), page_vaddr);
+    auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
     pte.set_physical_page_base(physical_page.paddr().get());
     pte.set_present(true);
     pte.set_writable(true);
@@ -627,7 +625,7 @@ void MemoryManager::unquickmap_page()
     ASSERT_INTERRUPTS_DISABLED();
     ASSERT(m_quickmap_in_use);
     auto page_vaddr = m_quickmap_addr;
-    auto pte = ensure_pte(kernel_page_directory(), page_vaddr);
+    auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
 #ifdef MM_DEBUG
     auto old_physical_address = pte.physical_page_base();
 #endif
@@ -646,7 +644,7 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg
     ASSERT(region.page_directory());
     InterruptDisabler disabler;
     auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
-    auto pte = ensure_pte(*region.page_directory(), page_vaddr);
+    auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
     auto& physical_page = region.vmo().physical_pages()[page_index_in_region];
     ASSERT(physical_page);
     pte.set_physical_page_base(physical_page->paddr().get());
@@ -681,7 +679,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
 #endif
     for (size_t i = 0; i < region.page_count(); ++i) {
         auto page_vaddr = vaddr.offset(i * PAGE_SIZE);
-        auto pte = ensure_pte(page_directory, page_vaddr);
+        auto& pte = ensure_pte(page_directory, page_vaddr);
         auto& physical_page = vmo.physical_pages()[region.first_page_index() + i];
         if (physical_page) {
             pte.set_physical_page_base(physical_page->paddr().get());
@@ -712,7 +710,7 @@ bool MemoryManager::unmap_region(Region& region)
     InterruptDisabler disabler;
     for (size_t i = 0; i < region.page_count(); ++i) {
         auto vaddr = region.vaddr().offset(i * PAGE_SIZE);
-        auto pte = ensure_pte(*region.page_directory(), vaddr);
+        auto& pte = ensure_pte(*region.page_directory(), vaddr);
         pte.set_physical_page_base(0);
         pte.set_present(false);
         pte.set_writable(false);

+ 3 - 103
Kernel/VM/MemoryManager.h

@@ -112,111 +112,11 @@ private:
 
     PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
 
-    struct PageDirectoryEntry {
-        explicit PageDirectoryEntry(dword* pde)
-            : m_pde(pde)
-        {
-        }
-
-        dword* page_table_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
-        void set_page_table_base(dword value)
-        {
-            *m_pde &= 0xfff;
-            *m_pde |= value & 0xfffff000;
-        }
-
-        dword raw() const { return *m_pde; }
-        dword* ptr() { return m_pde; }
-
-        enum Flags {
-            Present = 1 << 0,
-            ReadWrite = 1 << 1,
-            UserSupervisor = 1 << 2,
-            WriteThrough = 1 << 3,
-            CacheDisabled = 1 << 4,
-        };
-
-        bool is_present() const { return raw() & Present; }
-        void set_present(bool b) { set_bit(Present, b); }
-
-        bool is_user_allowed() const { return raw() & UserSupervisor; }
-        void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
-
-        bool is_writable() const { return raw() & ReadWrite; }
-        void set_writable(bool b) { set_bit(ReadWrite, b); }
-
-        bool is_write_through() const { return raw() & WriteThrough; }
-        void set_write_through(bool b) { set_bit(WriteThrough, b); }
-
-        bool is_cache_disabled() const { return raw() & CacheDisabled; }
-        void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
-
-        void set_bit(byte bit, bool value)
-        {
-            if (value)
-                *m_pde |= bit;
-            else
-                *m_pde &= ~bit;
-        }
-
-        dword* m_pde;
-    };
-
-    struct PageTableEntry {
-        explicit PageTableEntry(dword* pte)
-            : m_pte(pte)
-        {
-        }
-
-        dword* physical_page_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
-        void set_physical_page_base(dword value)
-        {
-            *m_pte &= 0xfffu;
-            *m_pte |= value & 0xfffff000u;
-        }
-
-        dword raw() const { return *m_pte; }
-        dword* ptr() { return m_pte; }
-
-        enum Flags {
-            Present = 1 << 0,
-            ReadWrite = 1 << 1,
-            UserSupervisor = 1 << 2,
-            WriteThrough = 1 << 3,
-            CacheDisabled = 1 << 4,
-        };
-
-        bool is_present() const { return raw() & Present; }
-        void set_present(bool b) { set_bit(Present, b); }
-
-        bool is_user_allowed() const { return raw() & UserSupervisor; }
-        void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
-
-        bool is_writable() const { return raw() & ReadWrite; }
-        void set_writable(bool b) { set_bit(ReadWrite, b); }
-
-        bool is_write_through() const { return raw() & WriteThrough; }
-        void set_write_through(bool b) { set_bit(WriteThrough, b); }
-
-        bool is_cache_disabled() const { return raw() & CacheDisabled; }
-        void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
-
-        void set_bit(byte bit, bool value)
-        {
-            if (value)
-                *m_pte |= bit;
-            else
-                *m_pte &= ~bit;
-        }
-
-        dword* m_pte;
-    };
-
-    PageTableEntry ensure_pte(PageDirectory&, VirtualAddress);
+    PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
 
     RefPtr<PageDirectory> m_kernel_page_directory;
-    dword* m_page_table_zero { nullptr };
-    dword* m_page_table_one { nullptr };
+    PageTableEntry* m_page_table_zero { nullptr };
+    PageTableEntry* m_page_table_one { nullptr };
 
     VirtualAddress m_quickmap_addr;
 

+ 1 - 1
Kernel/VM/PageDirectory.h

@@ -15,7 +15,7 @@ public:
     ~PageDirectory();
 
     dword cr3() const { return m_directory_page->paddr().get(); }
-    dword* entries() { return reinterpret_cast<dword*>(cr3()); }
+    PageDirectoryEntry* entries() { return reinterpret_cast<PageDirectoryEntry*>(cr3()); }
 
     void flush(VirtualAddress);