Jelajahi Sumber

Kernel: Allocate kernel stacks for threads using the region allocator.

This patch moves away from using kmalloc memory for thread kernel stacks.
This reduces pressure on kmalloc (16 KB per thread adds up fast) and
prevents kernel stack overflow from scribbling all over random unrelated
kernel memory.
Andreas Kling 6 tahun lalu
induk
melakukan
c8a216b107
6 mengubah file dengan 65 tambahan dan 24 penghapusan
  1. 5 6
      Kernel/Process.cpp
  2. 7 10
      Kernel/Thread.cpp
  3. 3 2
      Kernel/Thread.h
  4. 40 3
      Kernel/VM/MemoryManager.cpp
  5. 5 2
      Kernel/VM/MemoryManager.h
  6. 5 1
      Kernel/VM/VMObject.cpp

+ 5 - 6
Kernel/Process.cpp

@@ -552,6 +552,11 @@ Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring
 {
 {
     dbgprintf("Process: New process PID=%u with name=%s\n", m_pid, m_name.characters());
     dbgprintf("Process: New process PID=%u with name=%s\n", m_pid, m_name.characters());
 
 
+    if (fork_parent)
+        m_next_region = fork_parent->m_next_region;
+    else
+        m_next_region = LinearAddress(0x10000000);
+
     m_page_directory = PageDirectory::create();
     m_page_directory = PageDirectory::create();
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
     dbgprintf("Process %u ctor: PD=%x created\n", pid(), m_page_directory.ptr());
     dbgprintf("Process %u ctor: PD=%x created\n", pid(), m_page_directory.ptr());
@@ -596,12 +601,6 @@ Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring
         m_fds[2].set(*device_to_use_as_tty.open(O_WRONLY).value());
         m_fds[2].set(*device_to_use_as_tty.open(O_WRONLY).value());
     }
     }
 
 
-    if (fork_parent)
-        m_next_region = fork_parent->m_next_region;
-    else
-        m_next_region = LinearAddress(0x10000000);
-
-
     if (fork_parent) {
     if (fork_parent) {
         m_sid = fork_parent->m_sid;
         m_sid = fork_parent->m_sid;
         m_pgid = fork_parent->m_pgid;
         m_pgid = fork_parent->m_pgid;

+ 7 - 10
Kernel/Thread.cpp

@@ -45,12 +45,14 @@ Thread::Thread(Process& process)
         // FIXME: This memory is leaked.
         // FIXME: This memory is leaked.
         // But uh, there's also no kernel process termination, so I guess it's not technically leaked...
         // But uh, there's also no kernel process termination, so I guess it's not technically leaked...
         dword stack_bottom = (dword)kmalloc_eternal(default_kernel_stack_size);
         dword stack_bottom = (dword)kmalloc_eternal(default_kernel_stack_size);
-        m_tss.esp = (stack_bottom + default_kernel_stack_size) & 0xffffff8;
+        m_tss.esp = (stack_bottom + default_kernel_stack_size) & 0xfffffff8u;
     } else {
     } else {
         // Ring3 processes need a separate stack for Ring0.
         // Ring3 processes need a separate stack for Ring0.
-        m_kernel_stack = kmalloc(default_kernel_stack_size);
+        m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid));
+        m_kernel_stack_region->commit();
+
         m_tss.ss0 = 0x10;
         m_tss.ss0 = 0x10;
-        m_tss.esp0 = ((dword)m_kernel_stack + default_kernel_stack_size) & 0xffffff8;
+        m_tss.esp0 = m_kernel_stack_region->laddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
     }
     }
 
 
     // HACK: Ring2 SS in the TSS is the current PID.
     // HACK: Ring2 SS in the TSS is the current PID.
@@ -78,11 +80,6 @@ Thread::~Thread()
     if (selector())
     if (selector())
         gdt_free_entry(selector());
         gdt_free_entry(selector());
 
 
-    if (m_kernel_stack) {
-        kfree(m_kernel_stack);
-        m_kernel_stack = nullptr;
-    }
-
     if (m_kernel_stack_for_signal_handler) {
     if (m_kernel_stack_for_signal_handler) {
         kfree(m_kernel_stack_for_signal_handler);
         kfree(m_kernel_stack_for_signal_handler);
         m_kernel_stack_for_signal_handler = nullptr;
         m_kernel_stack_for_signal_handler = nullptr;
@@ -438,7 +435,7 @@ void Thread::push_value_on_stack(dword value)
 
 
 void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment)
 void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment)
 {
 {
-    auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "stack");
+    auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "Stack (Main thread)");
     ASSERT(region);
     ASSERT(region);
     m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
     m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
 
 
@@ -484,7 +481,7 @@ void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vect
 
 
 void Thread::make_userspace_stack_for_secondary_thread(void *argument)
 void Thread::make_userspace_stack_for_secondary_thread(void *argument)
 {
 {
-    auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, String::format("Thread %u Stack", tid()));
+    auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, String::format("Stack (Thread %d)", tid()));
     ASSERT(region);
     ASSERT(region);
     m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
     m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
 
 

+ 3 - 2
Kernel/Thread.h

@@ -4,6 +4,7 @@
 #include <Kernel/KResult.h>
 #include <Kernel/KResult.h>
 #include <Kernel/LinearAddress.h>
 #include <Kernel/LinearAddress.h>
 #include <Kernel/UnixTypes.h>
 #include <Kernel/UnixTypes.h>
+#include <Kernel/VM/Region.h>
 #include <AK/AKString.h>
 #include <AK/AKString.h>
 #include <AK/InlineLinkedList.h>
 #include <AK/InlineLinkedList.h>
 #include <AK/OwnPtr.h>
 #include <AK/OwnPtr.h>
@@ -100,7 +101,7 @@ public:
     void set_ticks_left(dword t) { m_ticks_left = t; }
     void set_ticks_left(dword t) { m_ticks_left = t; }
     dword ticks_left() const { return m_ticks_left; }
     dword ticks_left() const { return m_ticks_left; }
 
 
-    dword kernel_stack_base() const { return (dword)m_kernel_stack; }
+    dword kernel_stack_base() const { return m_kernel_stack_region->laddr().get(); }
     dword kernel_stack_for_signal_handler_base() const { return (dword)m_kernel_stack_for_signal_handler; }
     dword kernel_stack_for_signal_handler_base() const { return (dword)m_kernel_stack_for_signal_handler; }
 
 
     void set_selector(word s) { m_far_ptr.selector = s; }
     void set_selector(word s) { m_far_ptr.selector = s; }
@@ -144,7 +145,7 @@ private:
     dword m_times_scheduled { 0 };
     dword m_times_scheduled { 0 };
     dword m_pending_signals { 0 };
     dword m_pending_signals { 0 };
     dword m_signal_mask { 0 };
     dword m_signal_mask { 0 };
-    void* m_kernel_stack { nullptr };
+    RetainPtr<Region> m_kernel_stack_region;
     void* m_kernel_stack_for_signal_handler { nullptr };
     void* m_kernel_stack_for_signal_handler { nullptr };
     pid_t m_waitee_pid { -1 };
     pid_t m_waitee_pid { -1 };
     RetainPtr<FileDescriptor> m_blocked_descriptor;
     RetainPtr<FileDescriptor> m_blocked_descriptor;

+ 40 - 3
Kernel/VM/MemoryManager.cpp

@@ -80,7 +80,9 @@ void MemoryManager::initialize_paging()
     // 1 MB   -> 2 MB           kmalloc_eternal() space.
     // 1 MB   -> 2 MB           kmalloc_eternal() space.
     // 2 MB   -> 3 MB           kmalloc() space.
     // 2 MB   -> 3 MB           kmalloc() space.
     // 3 MB   -> 4 MB           Supervisor physical pages (available for allocation!)
     // 3 MB   -> 4 MB           Supervisor physical pages (available for allocation!)
-    // 4 MB   -> (max) MB       Userspace physical pages (available for allocation!)
+    // 4 MB   -> 0xc0000000     Userspace physical pages (available for allocation!)
+    // 0xc0000000-0xffffffff    Kernel-only linear address space
+
     for (size_t i = (2 * MB); i < (4 * MB); i += PAGE_SIZE)
     for (size_t i = (2 * MB); i < (4 * MB); i += PAGE_SIZE)
         m_free_supervisor_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(i), true));
         m_free_supervisor_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(i), true));
 
 
@@ -209,6 +211,13 @@ Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
 {
 {
     ASSERT_INTERRUPTS_DISABLED();
     ASSERT_INTERRUPTS_DISABLED();
 
 
+    if (laddr.get() >= 0xc0000000) {
+        for (auto& region : MM.m_kernel_regions) {
+            if (region->contains(laddr))
+                return region;
+        }
+    }
+
     // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
     // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
     for (auto& region : process.m_regions) {
     for (auto& region : process.m_regions) {
         if (region->contains(laddr))
         if (region->contains(laddr))
@@ -220,6 +229,13 @@ Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
 
 
 const Region* MemoryManager::region_from_laddr(const Process& process, LinearAddress laddr)
 const Region* MemoryManager::region_from_laddr(const Process& process, LinearAddress laddr)
 {
 {
+    if (laddr.get() >= 0xc0000000) {
+        for (auto& region : MM.m_kernel_regions) {
+            if (region->contains(laddr))
+                return region;
+        }
+    }
+
     // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
     // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
     for (auto& region : process.m_regions) {
     for (auto& region : process.m_regions) {
         if (region->contains(laddr))
         if (region->contains(laddr))
@@ -379,6 +395,21 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
     return PageFaultResponse::ShouldCrash;
     return PageFaultResponse::ShouldCrash;
 }
 }
 
 
+RetainPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
+{
+    InterruptDisabler disabler;
+
+    // FIXME: We need a linear address space allocator.
+    static dword next_laddr = 0xd0000000;
+    ASSERT(!(size % PAGE_SIZE));
+    LinearAddress laddr(next_laddr);
+    next_laddr += size + 16384;
+
+    auto region = adopt(*new Region(laddr, size, move(name), true, true, false));
+    MM.map_region_at_address(*m_kernel_page_directory, *region, laddr, false);
+    return region;
+}
+
 RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill)
 RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill)
 {
 {
     InterruptDisabler disabler;
     InterruptDisabler disabler;
@@ -610,13 +641,19 @@ void MemoryManager::unregister_vmo(VMObject& vmo)
 void MemoryManager::register_region(Region& region)
 void MemoryManager::register_region(Region& region)
 {
 {
     InterruptDisabler disabler;
     InterruptDisabler disabler;
-    m_regions.set(&region);
+    if (region.laddr().get() >= 0xc0000000)
+        m_kernel_regions.set(&region);
+    else
+        m_user_regions.set(&region);
 }
 }
 
 
 void MemoryManager::unregister_region(Region& region)
 void MemoryManager::unregister_region(Region& region)
 {
 {
     InterruptDisabler disabler;
     InterruptDisabler disabler;
-    m_regions.remove(&region);
+    if (region.laddr().get() >= 0xc0000000)
+        m_kernel_regions.remove(&region);
+    else
+        m_user_regions.remove(&region);
 }
 }
 
 
 ProcessPagingScope::ProcessPagingScope(Process& process)
 ProcessPagingScope::ProcessPagingScope(Process& process)

+ 5 - 2
Kernel/VM/MemoryManager.h

@@ -68,6 +68,9 @@ public:
 
 
     void map_for_kernel(LinearAddress, PhysicalAddress);
     void map_for_kernel(LinearAddress, PhysicalAddress);
 
 
+    RetainPtr<Region> allocate_kernel_region(size_t, String&& name);
+    void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
+
 private:
 private:
     MemoryManager();
     MemoryManager();
     ~MemoryManager();
     ~MemoryManager();
@@ -77,7 +80,6 @@ private:
     void register_region(Region&);
     void register_region(Region&);
     void unregister_region(Region&);
     void unregister_region(Region&);
 
 
-    void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
     void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
     void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
 
 
     void initialize_paging();
     void initialize_paging();
@@ -211,7 +213,8 @@ private:
     Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages;
     Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages;
 
 
     HashTable<VMObject*> m_vmos;
     HashTable<VMObject*> m_vmos;
-    HashTable<Region*> m_regions;
+    HashTable<Region*> m_user_regions;
+    HashTable<Region*> m_kernel_regions;
 
 
     size_t m_ram_size { 0 };
     size_t m_ram_size { 0 };
     bool m_quickmap_in_use { false };
     bool m_quickmap_in_use { false };

+ 5 - 1
Kernel/VM/VMObject.cpp

@@ -80,7 +80,11 @@ void VMObject::for_each_region(Callback callback)
 {
 {
     // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
     // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
     //        Perhaps VMObject could have a Vector<Region*> with all of his mappers?
     //        Perhaps VMObject could have a Vector<Region*> with all of his mappers?
-    for (auto* region : MM.m_regions) {
+    for (auto* region : MM.m_user_regions) {
+        if (&region->vmo() == this)
+            callback(*region);
+    }
+    for (auto* region : MM.m_kernel_regions) {
         if (&region->vmo() == this)
         if (&region->vmo() == this)
             callback(*region);
             callback(*region);
     }
     }