Kernel/aarch64: Stub out atomic SafeMem functions

These are used in futexes, which are needed if we want to get further in
`run-tests`.

For now, we have no way to return a non-fatal error if an access fault
is raised while executing these, so the kernel will panic. Some would
consider this a DoS vulnerability where a malicious userspace app can
crash the kernel by passing bogus pointers to it, but I prefer to call
it progress :^)
This commit is contained in:
Daniel Bertalan 2023-05-19 11:52:52 +02:00 committed by Andreas Kling
parent 09ef2c14e9
commit ab279c850b
Notes: sideshowbarker 2024-07-17 20:33:50 +09:00

View file

@ -29,34 +29,35 @@ bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*&)
return true; return true;
} }
Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32) Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
{ {
TODO_AARCH64(); // FIXME: Handle access faults.
return {}; return AK::atomic_compare_exchange_strong(var, expected, val, AK::memory_order_relaxed);
} }
Optional<u32> safe_atomic_load_relaxed(u32 volatile*) Optional<u32> safe_atomic_load_relaxed(u32 volatile* var)
{ {
TODO_AARCH64(); // FIXME: Handle access faults.
return {}; return AK::atomic_load(var, AK::memory_order_relaxed);
} }
Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile*, u32) Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
{ {
TODO_AARCH64(); // FIXME: Handle access faults.
return {}; return AK::atomic_fetch_add(var, val, AK::memory_order_relaxed);
} }
Optional<u32> safe_atomic_exchange_relaxed(u32 volatile*, u32) Optional<u32> safe_atomic_exchange_relaxed(u32 volatile* var, u32 val)
{ {
TODO_AARCH64(); // FIXME: Handle access faults.
return {}; return AK::atomic_exchange(var, val, AK::memory_order_relaxed);
} }
bool safe_atomic_store_relaxed(u32 volatile*, u32) bool safe_atomic_store_relaxed(u32 volatile* var, u32 val)
{ {
TODO_AARCH64(); // FIXME: Handle access faults.
return {}; AK::atomic_store(var, val);
return true;
} }
bool handle_safe_access_fault(RegisterState&, FlatPtr) bool handle_safe_access_fault(RegisterState&, FlatPtr)