Kernel: Add safe atomic functions

This allows us to perform atomic operations on potentially unsafe
user space pointers.
This commit is contained in:
Tom 2020-12-19 18:48:56 -07:00 committed by Andreas Kling
parent 992f513ad2
commit b17a889320
Notes: sideshowbarker 2024-07-18 23:08:18 +09:00
10 changed files with 573 additions and 166 deletions

View file

@ -31,6 +31,11 @@
namespace AK {
static inline void atomic_thread_fence(MemoryOrder order) noexcept
{
return __atomic_thread_fence(order);
}
template<typename T>
static inline T atomic_exchange(volatile T* var, T desired, MemoryOrder order = memory_order_seq_cst) noexcept
{

View file

@ -31,6 +31,7 @@
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Arch/i386/ISRStubs.h>
#include <Kernel/Arch/i386/ProcessorInfo.h>
#include <Kernel/Arch/i386/SafeMem.h>
#include <Kernel/IO.h>
#include <Kernel/Interrupts/APIC.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
@ -215,168 +216,6 @@ void fpu_exception_handler(TrapFrame*)
asm volatile("clts");
}
extern "C" u8* safe_memcpy_ins_1;
extern "C" u8* safe_memcpy_1_faulted;
extern "C" u8* safe_memcpy_ins_2;
extern "C" u8* safe_memcpy_2_faulted;
extern "C" u8* safe_strnlen_ins;
extern "C" u8* safe_strnlen_faulted;
extern "C" u8* safe_memset_ins_1;
extern "C" u8* safe_memset_1_faulted;
extern "C" u8* safe_memset_ins_2;
extern "C" u8* safe_memset_2_faulted;
bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
size_t remainder;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
asm volatile(
".global safe_memcpy_ins_1 \n"
"safe_memcpy_ins_1: \n"
"rep movsl \n"
".global safe_memcpy_1_faulted \n"
"safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=S" (src),
"=D" (dest),
"=c" (remainder),
[fault_at] "=d" (fault_at)
: "S" (src),
"D" (dest),
"c" (size_ts)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
n -= size_ts * sizeof(size_t);
if (n == 0) {
fault_at = nullptr;
return true;
}
}
asm volatile(
".global safe_memcpy_ins_2 \n"
"safe_memcpy_ins_2: \n"
"rep movsb \n"
".global safe_memcpy_2_faulted \n"
"safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=c" (remainder),
[fault_at] "=d" (fault_at)
: "S" (src),
"D" (dest),
"c" (n)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
fault_at = nullptr;
return true;
}
ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
{
ssize_t count = 0;
fault_at = nullptr;
asm volatile(
"1: \n"
"test %[max_n], %[max_n] \n"
"je 2f \n"
"dec %[max_n] \n"
".global safe_strnlen_ins \n"
"safe_strnlen_ins: \n"
"cmpb $0,(%[str], %[count], 1) \n"
"je 2f \n"
"inc %[count] \n"
"jmp 1b \n"
".global safe_strnlen_faulted \n"
"safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
"xor %[count_on_error], %[count_on_error] \n"
"dec %[count_on_error] \n" // return -1 on fault
"2:"
: [count_on_error] "=c" (count),
[fault_at] "=d" (fault_at)
: [str] "b" (str),
[count] "c" (count),
[max_n] "d" (max_n)
);
if (count >= 0)
fault_at = nullptr;
return count;
}
bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
size_t remainder;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = (u8)c;
expanded_c |= expanded_c << 8;
expanded_c |= expanded_c << 16;
asm volatile(
".global safe_memset_ins_1 \n"
"safe_memset_ins_1: \n"
"rep stosl \n"
".global safe_memset_1_faulted \n"
"safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=D" (dest),
"=c" (remainder),
[fault_at] "=d" (fault_at)
: "D" (dest),
"a" (expanded_c),
"c" (size_ts)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
n -= size_ts * sizeof(size_t);
if (remainder == 0) {
fault_at = nullptr;
return true;
}
}
asm volatile(
".global safe_memset_ins_2 \n"
"safe_memset_ins_2: \n"
"rep stosb \n"
".global safe_memset_2_faulted \n"
"safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=D" (dest),
"=c" (remainder),
[fault_at] "=d" (fault_at)
: "D" (dest),
"c" (n),
"a" (c)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
fault_at = nullptr;
return true;
}
static bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
{
// If we detect that the fault happened in safe_memcpy() safe_strnlen(),
// or safe_memset() then resume at the appropriate _faulted label
if (regs.eip == (FlatPtr)&safe_memcpy_ins_1)
regs.eip = (FlatPtr)&safe_memcpy_1_faulted;
else if (regs.eip == (FlatPtr)&safe_memcpy_ins_2)
regs.eip = (FlatPtr)&safe_memcpy_2_faulted;
else if (regs.eip == (FlatPtr)&safe_strnlen_ins)
regs.eip = (FlatPtr)&safe_strnlen_faulted;
else if (regs.eip == (FlatPtr)&safe_memset_ins_1)
regs.eip = (FlatPtr)&safe_memset_1_faulted;
else if (regs.eip == (FlatPtr)&safe_memset_ins_2)
regs.eip = (FlatPtr)&safe_memset_2_faulted;
else
return false;
regs.edx = fault_address;
return true;
}
// 14: Page Fault
EH_ENTRY(14, page_fault);

View file

@ -284,10 +284,6 @@ void flush_idt();
void load_task_register(u16 selector);
void handle_crash(RegisterState&, const char* description, int signal, bool out_of_memory = false);
[[nodiscard]] bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at);
[[nodiscard]] ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at);
[[nodiscard]] bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at);
#define LSW(x) ((u32)(x)&0xFFFF)
#define MSW(x) (((u32)(x) >> 16) & 0xFFFF)
#define LSB(x) ((x)&0xFF)

View file

@ -0,0 +1,318 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Arch/i386/SafeMem.h>
#define CODE_SECTION(section_name) __attribute__((section(section_name)))
extern "C" u8* start_of_safemem_text;
extern "C" u8* end_of_safemem_text;
extern "C" u8* safe_memcpy_ins_1;
extern "C" u8* safe_memcpy_1_faulted;
extern "C" u8* safe_memcpy_ins_2;
extern "C" u8* safe_memcpy_2_faulted;
extern "C" u8* safe_strnlen_ins;
extern "C" u8* safe_strnlen_faulted;
extern "C" u8* safe_memset_ins_1;
extern "C" u8* safe_memset_1_faulted;
extern "C" u8* safe_memset_ins_2;
extern "C" u8* safe_memset_2_faulted;
extern "C" u8* start_of_safemem_atomic_text;
extern "C" u8* end_of_safemem_atomic_text;
extern "C" u8* safe_atomic_fetch_add_relaxed_ins;
extern "C" u8* safe_atomic_fetch_add_relaxed_faulted;
extern "C" u8* safe_atomic_exchange_relaxed_ins;
extern "C" u8* safe_atomic_exchange_relaxed_faulted;
extern "C" u8* safe_atomic_load_relaxed_ins;
extern "C" u8* safe_atomic_load_relaxed_faulted;
extern "C" u8* safe_atomic_store_relaxed_ins;
extern "C" u8* safe_atomic_store_relaxed_faulted;
extern "C" u8* safe_atomic_compare_exchange_relaxed_ins;
extern "C" u8* safe_atomic_compare_exchange_relaxed_faulted;
namespace Kernel {
CODE_SECTION(".text.safemem")
bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
size_t remainder;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
asm volatile(
"safe_memcpy_ins_1: \n"
"rep movsl \n"
"safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=S"(src),
"=D"(dest),
"=c"(remainder),
[fault_at] "=d"(fault_at)
: "S"(src),
"D"(dest),
"c"(size_ts)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
n -= size_ts * sizeof(size_t);
if (n == 0) {
fault_at = nullptr;
return true;
}
}
asm volatile(
"safe_memcpy_ins_2: \n"
"rep movsb \n"
"safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=c"(remainder),
[fault_at] "=d"(fault_at)
: "S"(src),
"D"(dest),
"c"(n)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
fault_at = nullptr;
return true;
}
CODE_SECTION(".text.safemem")
ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
{
ssize_t count = 0;
fault_at = nullptr;
asm volatile(
"1: \n"
"test %[max_n], %[max_n] \n"
"je 2f \n"
"dec %[max_n] \n"
"safe_strnlen_ins: \n"
"cmpb $0,(%[str], %[count], 1) \n"
"je 2f \n"
"inc %[count] \n"
"jmp 1b \n"
"safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
"xor %[count_on_error], %[count_on_error] \n"
"dec %[count_on_error] \n" // return -1 on fault
"2:"
: [count_on_error] "=c"(count),
[fault_at] "=d"(fault_at)
: [str] "b"(str),
[count] "c"(count),
[max_n] "d"(max_n));
if (count >= 0)
fault_at = nullptr;
return count;
}
CODE_SECTION(".text.safemem")
bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
size_t remainder;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = (u8)c;
expanded_c |= expanded_c << 8;
expanded_c |= expanded_c << 16;
asm volatile(
"safe_memset_ins_1: \n"
"rep stosl \n"
"safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=D"(dest),
"=c"(remainder),
[fault_at] "=d"(fault_at)
: "D"(dest),
"a"(expanded_c),
"c"(size_ts)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
n -= size_ts * sizeof(size_t);
if (remainder == 0) {
fault_at = nullptr;
return true;
}
}
asm volatile(
"safe_memset_ins_2: \n"
"rep stosb \n"
"safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx to the fault address!
: "=D"(dest),
"=c"(remainder),
[fault_at] "=d"(fault_at)
: "D"(dest),
"c"(n),
"a"(c)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
fault_at = nullptr;
return true;
}
CODE_SECTION(".text.safemem.atomic")
Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
{
u32 result;
bool error;
asm volatile(
"xor %[error], %[error] \n"
"safe_atomic_fetch_add_relaxed_ins: \n"
"lock xadd %[result], %[var] \n"
"safe_atomic_fetch_add_relaxed_faulted: \n"
: [error] "=d"(error), [result] "=a"(result), [var] "=m"(*var)
: [val] "a"(val)
: "memory");
if (error)
return {};
return result;
}
CODE_SECTION(".text.safemem.atomic")
Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
{
u32 result;
bool error;
asm volatile(
"xor %[error], %[error] \n"
"safe_atomic_exchange_relaxed_ins: \n"
"xchg %[val], %[var] \n"
"safe_atomic_exchange_relaxed_faulted: \n"
: [error] "=d"(error), "=a"(result), [var] "=m"(*var)
: [val] "a"(val)
: "memory");
if (error)
return {};
return result;
}
CODE_SECTION(".text.safemem.atomic")
Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
{
u32 result;
bool error;
asm volatile(
"xor %[error], %[error] \n"
"safe_atomic_load_relaxed_ins: \n"
"mov (%[var]), %[result] \n"
"safe_atomic_load_relaxed_faulted: \n"
: [error] "=d"(error), [result] "=c"(result)
: [var] "b"(var)
: "memory");
if (error)
return {};
return result;
}
CODE_SECTION(".text.safemem.atomic")
bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
{
bool error;
asm volatile(
"xor %[error], %[error] \n"
"safe_atomic_store_relaxed_ins: \n"
"xchg %[val], %[var] \n"
"safe_atomic_store_relaxed_faulted: \n"
: [error] "=d"(error), [var] "=m"(*var)
: [val] "r"(val)
: "memory");
return !error;
}
CODE_SECTION(".text.safemem.atomic")
Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
{
// NOTE: accessing expected is NOT protected as it should always point
// to a valid location in kernel memory!
bool error;
bool did_exchange;
asm volatile(
"xor %[error], %[error] \n"
"safe_atomic_compare_exchange_relaxed_ins: \n"
"lock cmpxchg %[val], %[var] \n"
"safe_atomic_compare_exchange_relaxed_faulted: \n"
: [error] "=d"(error), "=a"(expected), [var] "=m"(*var), "=@ccz"(did_exchange)
: "a"(expected), [val] "b"(val)
: "memory");
if (error)
return {};
return did_exchange;
}
bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
{
if (regs.eip >= (FlatPtr)&start_of_safemem_text && regs.eip < (FlatPtr)&end_of_safemem_text) {
// If we detect that the fault happened in safe_memcpy() safe_strnlen(),
// or safe_memset() then resume at the appropriate _faulted label
if (regs.eip == (FlatPtr)&safe_memcpy_ins_1)
regs.eip = (FlatPtr)&safe_memcpy_1_faulted;
else if (regs.eip == (FlatPtr)&safe_memcpy_ins_2)
regs.eip = (FlatPtr)&safe_memcpy_2_faulted;
else if (regs.eip == (FlatPtr)&safe_strnlen_ins)
regs.eip = (FlatPtr)&safe_strnlen_faulted;
else if (regs.eip == (FlatPtr)&safe_memset_ins_1)
regs.eip = (FlatPtr)&safe_memset_1_faulted;
else if (regs.eip == (FlatPtr)&safe_memset_ins_2)
regs.eip = (FlatPtr)&safe_memset_2_faulted;
else
return false;
regs.edx = fault_address;
return true;
}
if (regs.eip >= (FlatPtr)&start_of_safemem_atomic_text && regs.eip < (FlatPtr)&end_of_safemem_atomic_text) {
// If we detect that a fault happened in one of the atomic safe_
// functions, resume at the appropriate _faulted label and set
// the edx register to 1 to indicate an error
if (regs.eip == (FlatPtr)&safe_atomic_fetch_add_relaxed_ins)
regs.eip = (FlatPtr)&safe_atomic_fetch_add_relaxed_faulted;
else if (regs.eip == (FlatPtr)&safe_atomic_exchange_relaxed_ins)
regs.eip = (FlatPtr)&safe_atomic_exchange_relaxed_faulted;
else if (regs.eip == (FlatPtr)&safe_atomic_load_relaxed_ins)
regs.eip = (FlatPtr)&safe_atomic_load_relaxed_faulted;
else if (regs.eip == (FlatPtr)&safe_atomic_store_relaxed_ins)
regs.eip = (FlatPtr)&safe_atomic_store_relaxed_faulted;
else if (regs.eip == (FlatPtr)&safe_atomic_compare_exchange_relaxed_ins)
regs.eip = (FlatPtr)&safe_atomic_compare_exchange_relaxed_faulted;
else
return false;
regs.edx = 1;
return true;
}
return false;
}
}

120
Kernel/Arch/i386/SafeMem.h Normal file
View file

@ -0,0 +1,120 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/Atomic.h>
#include <AK/Optional.h>
#include <AK/Types.h>
#pragma once
namespace Kernel {
struct RegisterState;
[[nodiscard]] bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at);
[[nodiscard]] ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at);
[[nodiscard]] bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at);
[[nodiscard]] Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> safe_atomic_load_relaxed(volatile u32* var);
[[nodiscard]] bool safe_atomic_store_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val);
[[nodiscard]] ALWAYS_INLINE Optional<u32> safe_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
{
auto expected_value = safe_atomic_load_relaxed(var);
if (!expected_value.has_value())
return {}; // fault
u32& expected = expected_value.value();
for (;;) {
auto result = safe_atomic_compare_exchange_relaxed(var, expected, expected & val);
if (!result.has_value())
return {}; // fault
if (result.value())
return expected; // exchanged
// This is only so that we don't saturate the bus...
AK::atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
}
}
[[nodiscard]] ALWAYS_INLINE Optional<u32> safe_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
{
auto expected_value = safe_atomic_load_relaxed(var);
if (!expected_value.has_value())
return {}; // fault
u32& expected = expected_value.value();
for (;;) {
auto result = safe_atomic_compare_exchange_relaxed(var, expected, expected & ~val);
if (!result.has_value())
return {}; // fault
if (result.value())
return expected; // exchanged
// This is only so that we don't saturate the bus...
AK::atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
}
}
[[nodiscard]] ALWAYS_INLINE Optional<u32> safe_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
{
auto expected_value = safe_atomic_load_relaxed(var);
if (!expected_value.has_value())
return {}; // fault
u32& expected = expected_value.value();
for (;;) {
auto result = safe_atomic_compare_exchange_relaxed(var, expected, expected | val);
if (!result.has_value())
return {}; // fault
if (result.value())
return expected; // exchanged
// This is only so that we don't saturate the bus...
AK::atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
}
}
[[nodiscard]] ALWAYS_INLINE Optional<u32> safe_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
{
auto expected_value = safe_atomic_load_relaxed(var);
if (!expected_value.has_value())
return {}; // fault
u32& expected = expected_value.value();
for (;;) {
auto result = safe_atomic_compare_exchange_relaxed(var, expected, expected ^ val);
if (!result.has_value())
return {}; // fault
if (result.value())
return expected; // exchanged
// This is only so that we don't saturate the bus...
AK::atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
}
}
bool handle_safe_access_fault(RegisterState& regs, u32 fault_address);
}

View file

@ -12,6 +12,7 @@ set(KERNEL_SOURCES
ACPI/Parser.cpp
Arch/i386/CPU.cpp
Arch/i386/ProcessorInfo.cpp
Arch/i386/SafeMem.cpp
Arch/PC/BIOS.cpp
CMOS.cpp
CommandLine.cpp

View file

@ -63,6 +63,115 @@ String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_si
return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
}
Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_add_relaxed(var, val);
}
Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_exchange_relaxed(var, val);
}
Optional<u32> user_atomic_load_relaxed(volatile u32* var)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_load_relaxed(var);
}
bool user_atomic_store_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return false; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return false;
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_store_relaxed(var, val);
}
Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
ASSERT(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
}
Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_and_relaxed(var, val);
}
Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
}
Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_or_relaxed(var, val);
}
Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
}
extern "C" {
bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)

View file

@ -37,6 +37,16 @@ struct StringArgument;
String copy_string_from_user(const char*, size_t);
String copy_string_from_user(Userspace<const char*>, size_t);
[[nodiscard]] Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_load_relaxed(volatile u32* var);
[[nodiscard]] bool user_atomic_store_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val);
extern "C" {
[[nodiscard]] bool copy_to_user(void*, const void*, size_t);

View file

@ -36,6 +36,7 @@
#include <AK/WeakPtr.h>
#include <AK/Weakable.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Arch/i386/SafeMem.h>
#include <Kernel/Forward.h>
#include <Kernel/KResult.h>
#include <Kernel/LockMode.h>

View file

@ -13,6 +13,14 @@ SECTIONS
start_of_kernel_text = .;
*(.text)
*(.text.startup)
start_of_safemem_text = .;
*(.text.safemem)
end_of_safemem_text = .;
start_of_safemem_atomic_text = .;
*(.text.safemem.atomic)
end_of_safemem_atomic_text = .;
end_of_kernel_text = .;
}