Explorar el Código

Kernel: Move kernel region checks from x86 page fault handler to MM

Ideally the x86 fault handler would only do x86 specific things and
delegate the rest of the work to MemoryManager. This patch moves some of
the address checks to a more generic place.
Andreas Kling hace 3 años
padre
commit
a12e19c015
Se han modificado 2 ficheros con 17 adiciones y 16 borrados
  1. 0 16
      Kernel/Arch/x86/common/Interrupts.cpp
  2. 17 0
      Kernel/Memory/MemoryManager.cpp

+ 0 - 16
Kernel/Arch/x86/common/Interrupts.cpp

@@ -327,22 +327,6 @@ void page_fault_handler(TrapFrame* trap)
         return handle_crash(regs, "Bad stack on page fault", SIGSEGV);
     }
 
-    if (fault_address >= (FlatPtr)&start_of_ro_after_init && fault_address < (FlatPtr)&end_of_ro_after_init) {
-        dump(regs);
-        PANIC("Attempt to write into READONLY_AFTER_INIT section");
-    }
-
-    if (fault_address >= (FlatPtr)&start_of_unmap_after_init && fault_address < (FlatPtr)&end_of_unmap_after_init) {
-        dump(regs);
-        auto sym = symbolicate_kernel_address(fault_address);
-        PANIC("Attempt to access UNMAP_AFTER_INIT section ({:p}: {})", fault_address, sym ? sym->name : "(Unknown)");
-    }
-
-    if (fault_address >= (FlatPtr)&start_of_kernel_ksyms && fault_address < (FlatPtr)&end_of_kernel_ksyms) {
-        dump(regs);
-        PANIC("Attempt to access KSYMS section");
-    }
-
     PageFault fault { regs.exception_code, VirtualAddress { fault_address } };
     auto response = MM.handle_page_fault(fault);
 

+ 17 - 0
Kernel/Memory/MemoryManager.cpp

@@ -12,6 +12,7 @@
 #include <Kernel/CMOS.h>
 #include <Kernel/FileSystem/Inode.h>
 #include <Kernel/Heap/kmalloc.h>
+#include <Kernel/KSyms.h>
 #include <Kernel/Memory/AnonymousVMObject.h>
 #include <Kernel/Memory/MemoryManager.h>
 #include <Kernel/Memory/PageDirectory.h>
@@ -720,6 +721,22 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
 PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
 {
     VERIFY_INTERRUPTS_DISABLED();
+
+    auto faulted_in_range = [&fault](auto const* start, auto const* end) {
+        return fault.vaddr() >= VirtualAddress { start } && fault.vaddr() < VirtualAddress { end };
+    };
+
+    if (faulted_in_range(&start_of_ro_after_init, &end_of_ro_after_init))
+        PANIC("Attempt to write into READONLY_AFTER_INIT section");
+
+    if (faulted_in_range(&start_of_unmap_after_init, &end_of_unmap_after_init)) {
+        auto const* kernel_symbol = symbolicate_kernel_address(fault.vaddr().get());
+        PANIC("Attempt to access UNMAP_AFTER_INIT section ({:p}: {})", fault.vaddr(), kernel_symbol ? kernel_symbol->name : "(Unknown)");
+    }
+
+    if (faulted_in_range(&start_of_kernel_ksyms, &end_of_kernel_ksyms))
+        PANIC("Attempt to access KSYMS section");
+
     if (Processor::current_in_irq()) {
         dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
             Processor::current_id(), fault.code(), fault.vaddr(), Processor::current_in_irq());