Ver Fonte

Kernel: Unbreak symbolication yet another time.

Andreas Kling há 6 anos atrás
pai
commit
0e73aa36c8
3 ficheiros alterados com 19 adições e 8 exclusões
  1. 17 6
      Kernel/Process.cpp
  2. 1 1
      Kernel/kmalloc.cpp
  3. 1 1
      Kernel/kmalloc.h

+ 17 - 6
Kernel/Process.cpp

@@ -1597,14 +1597,19 @@ void sleep(dword ticks)
     sched_yield();
 }
 
+static bool is_inside_kernel_code(LinearAddress laddr)
+{
+    // FIXME: What if we're indexing into the ksym with the highest address though?
+    return laddr.get() >= ksym_lowest_address && laddr.get() <= ksym_highest_address;
+}
+
 bool Process::validate_read_from_kernel(LinearAddress laddr) const
 {
     // We check extra carefully here since the first 4MB of the address space is identity-mapped.
     // This code allows access outside of the known used address ranges to get caught.
 
     InterruptDisabler disabler;
-    // FIXME: What if we're indexing into the ksym with the highest address though?
-    if (laddr.get() >= ksym_lowest_address && laddr.get() <= ksym_highest_address)
+    if (is_inside_kernel_code(laddr))
         return true;
     if (is_kmalloc_address(laddr.as_ptr()))
         return true;
@@ -1613,8 +1618,12 @@ bool Process::validate_read_from_kernel(LinearAddress laddr) const
 
 bool Process::validate_read(const void* address, size_t size) const
 {
-    if (isRing0())
-        return true;
+    if (isRing0()) {
+        if (is_inside_kernel_code(LinearAddress((dword)address)))
+            return true;
+        if (is_kmalloc_address(address))
+            return true;
+    }
     ASSERT(size);
     if (!size)
         return false;
@@ -1629,8 +1638,10 @@ bool Process::validate_read(const void* address, size_t size) const
 
 bool Process::validate_write(void* address, size_t size) const
 {
-    if (isRing0())
-        return true;
+    if (isRing0()) {
+        if (is_kmalloc_address(address))
+            return true;
+    }
     ASSERT(size);
     if (!size)
         return false;

+ 1 - 1
Kernel/kmalloc.cpp

@@ -38,7 +38,7 @@ volatile size_t kmalloc_sum_eternal = 0;
 static byte* s_next_eternal_ptr;
 static byte* s_end_of_eternal_range;
 
-bool is_kmalloc_address(void* ptr)
+bool is_kmalloc_address(const void* ptr)
 {
     if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
         return true;

+ 1 - 1
Kernel/kmalloc.h

@@ -10,7 +10,7 @@ void* kmalloc_aligned(size_t, size_t alignment) __attribute__ ((malloc));
 void kfree(void*);
 void kfree_aligned(void*);
 
-bool is_kmalloc_address(void*);
+bool is_kmalloc_address(const void*);
 
 extern volatile size_t sum_alloc;
 extern volatile size_t sum_free;