فهرست منبع

Give each task its own page directory.

This isn't finished but I'll commit as I go. We need to get to where context
switching only needs to change CR3 and everything's ready to go.

My basic idea is:
- The first 4 kB is off-limits. This catches null dereferences.
- Up to the 4 MB mark is identity-mapped and kernel-only.
- The rest is available to everyone!

While the first 4 MB is only available to the kernel, it's still mapped in
every process, for convenience when entering the kernel.
Andreas Kling 6 سال پیش
والد
کامیت
1da0a7c949
7فایلهای تغییر یافته به همراه47 افزوده شده و 15 حذف شده
  1. 19 11
      Kernel/MemoryManager.cpp
  2. 3 1
      Kernel/MemoryManager.h
  3. 2 2
      Kernel/ProcFileSystem.cpp
  4. 4 1
      Kernel/Task.cpp
  5. 2 0
      Kernel/Task.h
  6. 15 0
      Kernel/kmalloc.cpp
  7. 2 0
      Kernel/kmalloc.h

+ 19 - 11
Kernel/MemoryManager.cpp

@@ -26,6 +26,14 @@ MemoryManager::~MemoryManager()
 {
 {
 }
 }
 
 
+void MemoryManager::populatePageDirectory(Task& task)
+{
+    memset(task.m_pageDirectory, 0, 4096);
+
+    task.m_pageDirectory[0] = m_pageDirectory[0];
+    task.m_pageDirectory[1] = m_pageDirectory[1];
+}
+
 void MemoryManager::initializePaging()
 void MemoryManager::initializePaging()
 {
 {
     static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
     static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
@@ -41,6 +49,7 @@ void MemoryManager::initializePaging()
     // Make null dereferences crash.
     // Make null dereferences crash.
     protectMap(LinearAddress(0), 4 * KB);
     protectMap(LinearAddress(0), 4 * KB);
 
 
+    // The bottom 4 MB are identity mapped & supervisor only. Every process shares this mapping.
     identityMap(LinearAddress(4096), 4 * MB);
     identityMap(LinearAddress(4096), 4 * MB);
  
  
     for (size_t i = (4 * MB) + PAGE_SIZE; i < (8 * MB); i += PAGE_SIZE) {
     for (size_t i = (4 * MB) + PAGE_SIZE; i < (8 * MB); i += PAGE_SIZE) {
@@ -63,13 +72,13 @@ void* MemoryManager::allocatePageTable()
     return (void*)address;
     return (void*)address;
 }
 }
 
 
-auto MemoryManager::ensurePTE(LinearAddress linearAddress) -> PageTableEntry
+auto MemoryManager::ensurePTE(dword* pageDirectory, LinearAddress linearAddress) -> PageTableEntry
 {
 {
     ASSERT_INTERRUPTS_DISABLED();
     ASSERT_INTERRUPTS_DISABLED();
     dword pageDirectoryIndex = (linearAddress.get() >> 22) & 0x3ff;
     dword pageDirectoryIndex = (linearAddress.get() >> 22) & 0x3ff;
     dword pageTableIndex = (linearAddress.get() >> 12) & 0x3ff;
     dword pageTableIndex = (linearAddress.get() >> 12) & 0x3ff;
 
 
-    PageDirectoryEntry pde = PageDirectoryEntry(&m_pageDirectory[pageDirectoryIndex]);
+    PageDirectoryEntry pde = PageDirectoryEntry(&pageDirectory[pageDirectoryIndex]);
     if (!pde.isPresent()) {
     if (!pde.isPresent()) {
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
         kprintf("MM: PDE %u not present, allocating\n", pageDirectoryIndex);
         kprintf("MM: PDE %u not present, allocating\n", pageDirectoryIndex);
@@ -103,7 +112,7 @@ void MemoryManager::protectMap(LinearAddress linearAddress, size_t length)
     // FIXME: ASSERT(linearAddress is 4KB aligned);
     // FIXME: ASSERT(linearAddress is 4KB aligned);
     for (dword offset = 0; offset < length; offset += 4096) {
     for (dword offset = 0; offset < length; offset += 4096) {
         auto pteAddress = linearAddress.offset(offset);
         auto pteAddress = linearAddress.offset(offset);
-        auto pte = ensurePTE(pteAddress);
+        auto pte = ensurePTE(m_pageDirectory, pteAddress);
         pte.setPhysicalPageBase(pteAddress.get());
         pte.setPhysicalPageBase(pteAddress.get());
         pte.setUserAllowed(false);
         pte.setUserAllowed(false);
         pte.setPresent(false);
         pte.setPresent(false);
@@ -118,7 +127,7 @@ void MemoryManager::identityMap(LinearAddress linearAddress, size_t length)
     // FIXME: ASSERT(linearAddress is 4KB aligned);
     // FIXME: ASSERT(linearAddress is 4KB aligned);
     for (dword offset = 0; offset < length; offset += 4096) {
     for (dword offset = 0; offset < length; offset += 4096) {
         auto pteAddress = linearAddress.offset(offset);
         auto pteAddress = linearAddress.offset(offset);
-        auto pte = ensurePTE(pteAddress);
+        auto pte = ensurePTE(m_pageDirectory, pteAddress);
         pte.setPhysicalPageBase(pteAddress.get());
         pte.setPhysicalPageBase(pteAddress.get());
         pte.setUserAllowed(true);
         pte.setUserAllowed(true);
         pte.setPresent(true);
         pte.setPresent(true);
@@ -195,7 +204,7 @@ Vector<PhysicalAddress> MemoryManager::allocatePhysicalPages(size_t count)
 byte* MemoryManager::quickMapOnePage(PhysicalAddress physicalAddress)
 byte* MemoryManager::quickMapOnePage(PhysicalAddress physicalAddress)
 {
 {
     ASSERT_INTERRUPTS_DISABLED();
     ASSERT_INTERRUPTS_DISABLED();
-    auto pte = ensurePTE(LinearAddress(4 * MB));
+    auto pte = ensurePTE(m_pageDirectory, LinearAddress(4 * MB));
     kprintf("MM: quickmap %x @ %x {pte @ %p}\n", physicalAddress.get(), 4*MB, pte.ptr());
     kprintf("MM: quickmap %x @ %x {pte @ %p}\n", physicalAddress.get(), 4*MB, pte.ptr());
     pte.setPhysicalPageBase(physicalAddress.pageBase());
     pte.setPhysicalPageBase(physicalAddress.pageBase());
     pte.setPresent(true);
     pte.setPresent(true);
@@ -223,7 +232,7 @@ bool MemoryManager::unmapRegion(Task& task, Task::Region& region)
     auto& zone = *region.zone;
     auto& zone = *region.zone;
     for (size_t i = 0; i < zone.m_pages.size(); ++i) {
     for (size_t i = 0; i < zone.m_pages.size(); ++i) {
         auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
         auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
-        auto pte = ensurePTE(laddr);
+        auto pte = ensurePTE(task.m_pageDirectory, laddr);
         pte.setPhysicalPageBase(0);
         pte.setPhysicalPageBase(0);
         pte.setPresent(false);
         pte.setPresent(false);
         pte.setWritable(false);
         pte.setWritable(false);
@@ -238,12 +247,11 @@ bool MemoryManager::unmapSubregion(Task& task, Task::Subregion& subregion)
 {
 {
     InterruptDisabler disabler;
     InterruptDisabler disabler;
     auto& region = *subregion.region;
     auto& region = *subregion.region;
-    auto& zone = *region.zone;
     size_t numPages = subregion.size / 4096;
     size_t numPages = subregion.size / 4096;
     ASSERT(numPages);
     ASSERT(numPages);
     for (size_t i = 0; i < numPages; ++i) {
     for (size_t i = 0; i < numPages; ++i) {
         auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
         auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
-        auto pte = ensurePTE(laddr);
+        auto pte = ensurePTE(task.m_pageDirectory, laddr);
         pte.setPhysicalPageBase(0);
         pte.setPhysicalPageBase(0);
         pte.setPresent(false);
         pte.setPresent(false);
         pte.setWritable(false);
         pte.setWritable(false);
@@ -278,7 +286,7 @@ bool MemoryManager::mapSubregion(Task& task, Task::Subregion& subregion)
     ASSERT(numPages);
     ASSERT(numPages);
     for (size_t i = 0; i < numPages; ++i) {
     for (size_t i = 0; i < numPages; ++i) {
         auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
         auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
-        auto pte = ensurePTE(laddr);
+        auto pte = ensurePTE(task.m_pageDirectory, laddr);
         pte.setPhysicalPageBase(zone.m_pages[firstPage + i].get());
         pte.setPhysicalPageBase(zone.m_pages[firstPage + i].get());
         pte.setPresent(true);
         pte.setPresent(true);
         pte.setWritable(true);
         pte.setWritable(true);
@@ -295,11 +303,11 @@ bool MemoryManager::mapRegion(Task& task, Task::Region& region)
     auto& zone = *region.zone;
     auto& zone = *region.zone;
     for (size_t i = 0; i < zone.m_pages.size(); ++i) {
     for (size_t i = 0; i < zone.m_pages.size(); ++i) {
         auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
         auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
-        auto pte = ensurePTE(laddr);
+        auto pte = ensurePTE(task.m_pageDirectory,laddr);
         pte.setPhysicalPageBase(zone.m_pages[i].get());
         pte.setPhysicalPageBase(zone.m_pages[i].get());
         pte.setPresent(true);
         pte.setPresent(true);
         pte.setWritable(true);
         pte.setWritable(true);
-        pte.setUserAllowed(!task.isRing0());
+        pte.setUserAllowed(!task.isRing0()); // FIXME: This doesn't make sense. Allow USER if the TASK is RING0? Wh...what?
         flushTLB(laddr);
         flushTLB(laddr);
         //kprintf("MM: >> Mapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
         //kprintf("MM: >> Mapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
     }
     }

+ 3 - 1
Kernel/MemoryManager.h

@@ -65,6 +65,8 @@ public:
     void registerZone(Zone&);
     void registerZone(Zone&);
     void unregisterZone(Zone&);
     void unregisterZone(Zone&);
 
 
+    void populatePageDirectory(Task&);
+
 private:
 private:
     MemoryManager();
     MemoryManager();
     ~MemoryManager();
     ~MemoryManager();
@@ -158,7 +160,7 @@ private:
         dword* m_pte;
         dword* m_pte;
     };
     };
 
 
-    PageTableEntry ensurePTE(LinearAddress);
+    PageTableEntry ensurePTE(dword* pageDirectory, LinearAddress);
 
 
     dword* m_pageDirectory;
     dword* m_pageDirectory;
     dword* m_pageTableZero;
     dword* m_pageTableZero;

+ 2 - 2
Kernel/ProcFileSystem.cpp

@@ -172,9 +172,9 @@ ByteBuffer procfs$mounts()
 ByteBuffer procfs$kmalloc()
 ByteBuffer procfs$kmalloc()
 {
 {
     InterruptDisabler disabler;
     InterruptDisabler disabler;
-    auto buffer = ByteBuffer::createUninitialized(128);
+    auto buffer = ByteBuffer::createUninitialized(256);
     char* ptr = (char*)buffer.pointer();
     char* ptr = (char*)buffer.pointer();
-    ptr += ksprintf(ptr, "eternal:   %u\nallocated: %u\nfree:      %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
+    ptr += ksprintf(ptr, "eternal:      %u\npage-aligned: %u\nallocated:    %u\nfree:         %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
     buffer.trim(ptr - (char*)buffer.pointer());
     buffer.trim(ptr - (char*)buffer.pointer());
     return buffer;
     return buffer;
 }
 }

+ 4 - 1
Kernel/Task.cpp

@@ -408,6 +408,9 @@ Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring,
     , m_tty(tty)
     , m_tty(tty)
     , m_parentPID(parentPID)
     , m_parentPID(parentPID)
 {
 {
+    m_pageDirectory = (dword*)kmalloc_page_aligned(4096);
+    MM.populatePageDirectory(*this);
+
     if (tty) {
     if (tty) {
         m_fileHandles.append(tty->open(O_RDONLY)); // stdin
         m_fileHandles.append(tty->open(O_RDONLY)); // stdin
         m_fileHandles.append(tty->open(O_WRONLY)); // stdout
         m_fileHandles.append(tty->open(O_WRONLY)); // stdout
@@ -449,7 +452,7 @@ Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring,
     m_tss.ss = ss;
     m_tss.ss = ss;
     m_tss.cs = cs;
     m_tss.cs = cs;
 
 
-    m_tss.cr3 = MM.pageDirectoryBase().get();
+    m_tss.cr3 = (dword)m_pageDirectory;
 
 
     if (isRing0()) {
     if (isRing0()) {
         // FIXME: This memory is leaked.
         // FIXME: This memory is leaked.

+ 2 - 0
Kernel/Task.h

@@ -152,6 +152,8 @@ private:
 
 
     void allocateLDT();
     void allocateLDT();
 
 
+    dword* m_pageDirectory { nullptr };
+
     Task* m_prev { nullptr };
     Task* m_prev { nullptr };
     Task* m_next { nullptr };
     Task* m_next { nullptr };
 
 

+ 15 - 0
Kernel/kmalloc.cpp

@@ -22,6 +22,7 @@ typedef struct
 #define CHUNK_SIZE  128
 #define CHUNK_SIZE  128
 #define POOL_SIZE   (1024 * 1024)
 #define POOL_SIZE   (1024 * 1024)
 
 
+#define PAGE_ALIGNED_BASE_PHYSICAL 0x380000
 #define ETERNAL_BASE_PHYSICAL 0x300000
 #define ETERNAL_BASE_PHYSICAL 0x300000
 #define BASE_PHYS   0x200000
 #define BASE_PHYS   0x200000
 
 
@@ -30,8 +31,10 @@ PRIVATE BYTE alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
 volatile DWORD sum_alloc = 0;
 volatile DWORD sum_alloc = 0;
 volatile DWORD sum_free = POOL_SIZE;
 volatile DWORD sum_free = POOL_SIZE;
 volatile size_t kmalloc_sum_eternal = 0;
 volatile size_t kmalloc_sum_eternal = 0;
+volatile size_t kmalloc_sum_page_aligned = 0;
 
 
 static byte* s_next_eternal_ptr;
 static byte* s_next_eternal_ptr;
+static byte* s_next_page_aligned_ptr;
 
 
 bool is_kmalloc_address(void* ptr)
 bool is_kmalloc_address(void* ptr)
 {
 {
@@ -47,10 +50,12 @@ kmalloc_init()
     memset( (void *)BASE_PHYS, 0, POOL_SIZE );
     memset( (void *)BASE_PHYS, 0, POOL_SIZE );
 
 
     kmalloc_sum_eternal = 0;
     kmalloc_sum_eternal = 0;
+    kmalloc_sum_page_aligned = 0;
     sum_alloc = 0;
     sum_alloc = 0;
     sum_free = POOL_SIZE;
     sum_free = POOL_SIZE;
 
 
     s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
     s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
+    s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
 }
 }
 
 
 void* kmalloc_eternal(size_t size)
 void* kmalloc_eternal(size_t size)
@@ -61,6 +66,16 @@ void* kmalloc_eternal(size_t size)
     return ptr;
     return ptr;
 }
 }
 
 
+void* kmalloc_page_aligned(size_t size)
+{
+    ASSERT((size % 4096) == 0);
+    void* ptr = s_next_page_aligned_ptr;
+    s_next_page_aligned_ptr += size;
+    kmalloc_sum_page_aligned += size;
+    return ptr;
+}
+
+
 PUBLIC void *
 PUBLIC void *
 kmalloc( DWORD size )
 kmalloc( DWORD size )
 {
 {

+ 2 - 0
Kernel/kmalloc.h

@@ -3,6 +3,7 @@
 void kmalloc_init();
 void kmalloc_init();
 void *kmalloc(DWORD size) __attribute__ ((malloc));
 void *kmalloc(DWORD size) __attribute__ ((malloc));
 void* kmalloc_eternal(size_t) __attribute__ ((malloc));
 void* kmalloc_eternal(size_t) __attribute__ ((malloc));
+void* kmalloc_page_aligned(size_t) __attribute__ ((malloc));
 void kfree(void*);
 void kfree(void*);
 
 
 bool is_kmalloc_address(void*);
 bool is_kmalloc_address(void*);
@@ -10,6 +11,7 @@ bool is_kmalloc_address(void*);
 extern volatile DWORD sum_alloc;
 extern volatile DWORD sum_alloc;
 extern volatile DWORD sum_free;
 extern volatile DWORD sum_free;
 extern volatile dword kmalloc_sum_eternal;
 extern volatile dword kmalloc_sum_eternal;
+extern volatile dword kmalloc_sum_page_aligned;
 
 
 inline void* operator new(size_t, void* p) { return p; }
 inline void* operator new(size_t, void* p) { return p; }
 inline void* operator new[](size_t, void* p) { return p; }
 inline void* operator new[](size_t, void* p) { return p; }