Преглед изворни кода

Kernel: Implement safe_* memory access functions for x86_64

Gunnar Beutner пре 4 година
родитељ
комит
df530941cc

+ 1 - 1
Kernel/Arch/x86/SafeMem.h

@@ -95,6 +95,6 @@ struct RegisterState;
     }
 }
 
-bool handle_safe_access_fault(RegisterState& regs, u32 fault_address);
+bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address);
 
 }

+ 55 - 29
Kernel/Arch/x86/i386/SafeMem.cpp → Kernel/Arch/x86/common/SafeMem.cpp

@@ -51,8 +51,12 @@ NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, voi
         size_t size_ts = n / sizeof(size_t);
         asm volatile(
             "safe_memcpy_ins_1: \n"
+#if ARCH(I386)
             "rep movsl \n"
-            "safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
+#else
+            "rep movsq \n"
+#endif
+            "safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
             : "=S"(src),
             "=D"(dest),
             "=c"(remainder),
@@ -72,7 +76,7 @@ NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, voi
     asm volatile(
         "safe_memcpy_ins_2: \n"
         "rep movsb \n"
-        "safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
+        "safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
         : "=c"(remainder),
         [fault_at] "=d"(fault_at)
         : "S"(src),
@@ -100,7 +104,7 @@ NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at
         "je 2f \n"
         "inc %[count] \n"
         "jmp 1b \n"
-        "safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
+        "safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
         "xor %[count_on_error], %[count_on_error] \n"
         "dec %[count_on_error] \n" // return -1 on fault
         "2:"
@@ -128,8 +132,12 @@ NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
         expanded_c |= expanded_c << 16;
         asm volatile(
             "safe_memset_ins_1: \n"
+#if ARCH(I386)
             "rep stosl \n"
-            "safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
+#else
+            "rep stosq \n"
+#endif
+            "safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
             : "=D"(dest),
             "=c"(remainder),
             [fault_at] "=d"(fault_at)
@@ -148,7 +156,7 @@ NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
     asm volatile(
         "safe_memset_ins_2: \n"
         "rep stosb \n"
-        "safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
+        "safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
         : "=D"(dest),
         "=c"(remainder),
         [fault_at] "=d"(fault_at)
@@ -251,45 +259,63 @@ NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* v
     return did_exchange;
 }
 
-bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
+bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
 {
-    if (regs.eip >= (FlatPtr)&start_of_safemem_text && regs.eip < (FlatPtr)&end_of_safemem_text) {
+    FlatPtr ip;
+#if ARCH(I386)
+    ip = regs.eip;
+#else
+    ip = regs.rip;
+#endif
+    if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
         // If we detect that the fault happened in safe_memcpy() safe_strnlen(),
         // or safe_memset() then resume at the appropriate _faulted label
-        if (regs.eip == (FlatPtr)&safe_memcpy_ins_1)
-            regs.eip = (FlatPtr)&safe_memcpy_1_faulted;
-        else if (regs.eip == (FlatPtr)&safe_memcpy_ins_2)
-            regs.eip = (FlatPtr)&safe_memcpy_2_faulted;
-        else if (regs.eip == (FlatPtr)&safe_strnlen_ins)
-            regs.eip = (FlatPtr)&safe_strnlen_faulted;
-        else if (regs.eip == (FlatPtr)&safe_memset_ins_1)
-            regs.eip = (FlatPtr)&safe_memset_1_faulted;
-        else if (regs.eip == (FlatPtr)&safe_memset_ins_2)
-            regs.eip = (FlatPtr)&safe_memset_2_faulted;
+        if (ip == (FlatPtr)&safe_memcpy_ins_1)
+            ip = (FlatPtr)&safe_memcpy_1_faulted;
+        else if (ip == (FlatPtr)&safe_memcpy_ins_2)
+            ip = (FlatPtr)&safe_memcpy_2_faulted;
+        else if (ip == (FlatPtr)&safe_strnlen_ins)
+            ip = (FlatPtr)&safe_strnlen_faulted;
+        else if (ip == (FlatPtr)&safe_memset_ins_1)
+            ip = (FlatPtr)&safe_memset_1_faulted;
+        else if (ip == (FlatPtr)&safe_memset_ins_2)
+            ip = (FlatPtr)&safe_memset_2_faulted;
         else
             return false;
 
+#if ARCH(I386)
+        regs.eip = ip;
         regs.edx = fault_address;
+#else
+        regs.rip = ip;
+        regs.rdx = fault_address;
+#endif
         return true;
     }
-    if (regs.eip >= (FlatPtr)&start_of_safemem_atomic_text && regs.eip < (FlatPtr)&end_of_safemem_atomic_text) {
+    if (ip >= (FlatPtr)&start_of_safemem_atomic_text && ip < (FlatPtr)&end_of_safemem_atomic_text) {
         // If we detect that a fault happened in one of the atomic safe_
         // functions, resume at the appropriate _faulted label and set
-        // the edx register to 1 to indicate an error
-        if (regs.eip == (FlatPtr)&safe_atomic_fetch_add_relaxed_ins)
-            regs.eip = (FlatPtr)&safe_atomic_fetch_add_relaxed_faulted;
-        else if (regs.eip == (FlatPtr)&safe_atomic_exchange_relaxed_ins)
-            regs.eip = (FlatPtr)&safe_atomic_exchange_relaxed_faulted;
-        else if (regs.eip == (FlatPtr)&safe_atomic_load_relaxed_ins)
-            regs.eip = (FlatPtr)&safe_atomic_load_relaxed_faulted;
-        else if (regs.eip == (FlatPtr)&safe_atomic_store_relaxed_ins)
-            regs.eip = (FlatPtr)&safe_atomic_store_relaxed_faulted;
-        else if (regs.eip == (FlatPtr)&safe_atomic_compare_exchange_relaxed_ins)
-            regs.eip = (FlatPtr)&safe_atomic_compare_exchange_relaxed_faulted;
+        // the edx/rdx register to 1 to indicate an error
+        if (ip == (FlatPtr)&safe_atomic_fetch_add_relaxed_ins)
+            ip = (FlatPtr)&safe_atomic_fetch_add_relaxed_faulted;
+        else if (ip == (FlatPtr)&safe_atomic_exchange_relaxed_ins)
+            ip = (FlatPtr)&safe_atomic_exchange_relaxed_faulted;
+        else if (ip == (FlatPtr)&safe_atomic_load_relaxed_ins)
+            ip = (FlatPtr)&safe_atomic_load_relaxed_faulted;
+        else if (ip == (FlatPtr)&safe_atomic_store_relaxed_ins)
+            ip = (FlatPtr)&safe_atomic_store_relaxed_faulted;
+        else if (ip == (FlatPtr)&safe_atomic_compare_exchange_relaxed_ins)
+            ip = (FlatPtr)&safe_atomic_compare_exchange_relaxed_faulted;
         else
             return false;
 
+#if ARCH(I386)
+        regs.eip = ip;
         regs.edx = 1;
+#else
+        regs.rip = ip;
+        regs.rdx = 1;
+#endif
         return true;
     }
     return false;

+ 0 - 89
Kernel/Arch/x86/x86_64/SafeMem.cpp

@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/Arch/x86/SafeMem.h>
-
-#define CODE_SECTION(section_name) __attribute__((section(section_name)))
-
-namespace Kernel {
-
-CODE_SECTION(".text.safemem")
-NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
-{
-    (void)dest_ptr;
-    (void)src_ptr;
-    (void)n;
-    (void)fault_at;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem")
-NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
-{
-    (void)str;
-    (void)max_n;
-    (void)fault_at;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem")
-NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
-{
-    (void)dest_ptr;
-    (void)c;
-    (void)n;
-    (void)fault_at;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem.atomic")
-NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
-{
-    (void)var;
-    (void)val;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem.atomic")
-NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
-{
-    (void)var;
-    (void)val;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem.atomic")
-NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
-{
-    (void)var;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem.atomic")
-NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
-{
-    (void)var;
-    (void)val;
-    TODO();
-}
-
-CODE_SECTION(".text.safemem.atomic")
-NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
-{
-    (void)var;
-    (void)expected;
-    (void)val;
-    TODO();
-}
-
-bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
-{
-    (void)regs;
-    (void)fault_address;
-    TODO();
-}
-
-}

+ 1 - 1
Kernel/CMakeLists.txt

@@ -280,7 +280,6 @@ set(KERNEL_SOURCES
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/CPU.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/InterruptEntry.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/Processor.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/SafeMem.cpp
 )
 
 set(KERNEL_SOURCES
@@ -291,6 +290,7 @@ set(KERNEL_SOURCES
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Interrupts.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Processor.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/ProcessorInfo.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/SafeMem.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/TrapFrame.cpp
 )