Kernel/aarch64: Detect if access faults come from SafeMem

This commit lets us differentiate whether access faults are caused by
accessing junk memory addresses given to us by userspace or if we hit a
kernel bug.

The stub implementations of the `safe_*` functions currently don't let
us jump back into them and return a value indicating failure, so we
panic if such a fault happens. Practically, this means that we still
crash, but if the access violation was caused by something else, we take
the usual kernel crash code path and print a register and memory dump,
rather than hitting the `TODO_AARCH64` in `handle_safe_access_fault`.
This commit is contained in:
Daniel Bertalan 2023-05-19 13:06:21 +02:00 committed by Andreas Kling
parent 0da2d2102a
commit beb55f726f
Notes: sideshowbarker 2024-07-17 05:13:53 +09:00
2 changed files with 47 additions and 10 deletions

View file

@ -1,15 +1,26 @@
/*
* Copyright (c) 2022, Timon Kruiper <timonkruiper@gmail.com>
* Copyright (c) 2023, Daniel Bertalan <dani@danielbertalan.dev>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/RegisterState.h>
#include <Kernel/Arch/SafeMem.h>
#include <Kernel/StdLib.h>
#define CODE_SECTION(section_name) __attribute__((section(section_name)))
extern "C" u8 start_of_safemem_text[];
extern "C" u8 end_of_safemem_text[];
extern "C" u8 start_of_safemem_atomic_text[];
extern "C" u8 end_of_safemem_atomic_text[];
namespace Kernel {
bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
CODE_SECTION(".text.safemem")
NEVER_INLINE FLATTEN bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
// FIXME: Actually implement a safe memset.
auto* dest = static_cast<u8*>(dest_ptr);
@ -19,7 +30,8 @@ bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
return true;
}
ssize_t safe_strnlen(char const* str, unsigned long max_n, void*& fault_at)
CODE_SECTION(".text.safemem")
NEVER_INLINE FLATTEN ssize_t safe_strnlen(char const* str, unsigned long max_n, void*& fault_at)
{
// FIXME: Actually implement a safe strnlen.
size_t len = 0;
@ -29,7 +41,8 @@ ssize_t safe_strnlen(char const* str, unsigned long max_n, void*& fault_at)
return len;
}
bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*& fault_at)
CODE_SECTION(".text.safemem")
NEVER_INLINE FLATTEN bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*& fault_at)
{
// FIXME: Actually implement a safe memcpy.
auto* pd = static_cast<u8*>(dest_ptr);
@ -40,40 +53,56 @@ bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*& fa
return true;
}
Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE FLATTEN Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
{
// FIXME: Handle access faults.
return AK::atomic_compare_exchange_strong(var, expected, val, AK::memory_order_relaxed);
}
Optional<u32> safe_atomic_load_relaxed(u32 volatile* var)
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE FLATTEN Optional<u32> safe_atomic_load_relaxed(u32 volatile* var)
{
// FIXME: Handle access faults.
return AK::atomic_load(var, AK::memory_order_relaxed);
}
Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE FLATTEN Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
{
// FIXME: Handle access faults.
return AK::atomic_fetch_add(var, val, AK::memory_order_relaxed);
}
Optional<u32> safe_atomic_exchange_relaxed(u32 volatile* var, u32 val)
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE FLATTEN Optional<u32> safe_atomic_exchange_relaxed(u32 volatile* var, u32 val)
{
// FIXME: Handle access faults.
return AK::atomic_exchange(var, val, AK::memory_order_relaxed);
}
bool safe_atomic_store_relaxed(u32 volatile* var, u32 val)
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE FLATTEN bool safe_atomic_store_relaxed(u32 volatile* var, u32 val)
{
// FIXME: Handle access faults.
AK::atomic_store(var, val);
return true;
}
bool handle_safe_access_fault(RegisterState&, FlatPtr)
bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
{
TODO_AARCH64();
FlatPtr ip = regs.ip();
if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
dbgln("FIXME: Faulted while accessing userspace address {:p}.", fault_address);
dbgln(" We need to jump back into the appropriate SafeMem function, set fault_at and return failure.");
TODO_AARCH64();
} else if (ip >= (FlatPtr)&start_of_safemem_atomic_text && ip < (FlatPtr)&end_of_safemem_atomic_text) {
dbgln("FIXME: Faulted while accessing userspace address {:p}.", fault_address);
dbgln(" We need to jump back into the appropriate atomic SafeMem function and return failure.");
TODO_AARCH64();
}
return false;
}

View file

@ -20,6 +20,14 @@ SECTIONS
.text ALIGN(4K) : AT (ADDR(.text) - KERNEL_MAPPING_BASE)
{
*(.text.first)
start_of_safemem_text = .;
KEEP(*(.text.safemem))
end_of_safemem_text = .;
start_of_safemem_atomic_text = .;
KEEP(*(.text.safemem.atomic))
end_of_safemem_atomic_text = .;
*(.text*)
} :text