2020-02-09 14:28:56 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-02-09 14:28:56 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <AK/Assertions.h>
|
2020-08-20 23:17:15 +00:00
|
|
|
#include <AK/MemMem.h>
|
2020-02-09 14:28:56 +00:00
|
|
|
#include <AK/Types.h>
|
2022-05-02 20:46:43 +00:00
|
|
|
#include <Kernel/Arch/SafeMem.h>
|
2021-10-14 21:53:48 +00:00
|
|
|
#include <Kernel/Arch/SmapDisabler.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/MemoryManager.h>
|
2020-05-16 10:00:04 +00:00
|
|
|
#include <Kernel/StdLib.h>
|
2020-02-09 14:28:56 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<char const*> user_str, size_t user_str_size)
|
2021-05-28 07:29:16 +00:00
|
|
|
{
|
2021-11-14 22:52:48 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(user_str.vaddr(), user_str_size);
|
2021-05-28 07:29:16 +00:00
|
|
|
if (!is_user)
|
|
|
|
return EFAULT;
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
void* fault_at;
|
2021-08-13 05:04:31 +00:00
|
|
|
ssize_t length = Kernel::safe_strnlen(user_str.unsafe_userspace_ptr(), user_str_size, fault_at);
|
2021-05-28 07:29:16 +00:00
|
|
|
if (length < 0) {
|
2022-04-01 17:58:27 +00:00
|
|
|
dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<void const*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
|
2021-05-28 07:29:16 +00:00
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
char* buffer;
|
2021-09-06 17:24:54 +00:00
|
|
|
auto new_string = TRY(Kernel::KString::try_create_uninitialized(length, buffer));
|
2021-05-28 07:29:16 +00:00
|
|
|
|
|
|
|
buffer[length] = '\0';
|
|
|
|
|
|
|
|
if (length == 0)
|
2021-09-06 17:24:54 +00:00
|
|
|
return new_string;
|
2021-05-28 07:29:16 +00:00
|
|
|
|
2021-08-13 05:04:31 +00:00
|
|
|
if (!Kernel::safe_memcpy(buffer, user_str.unsafe_userspace_ptr(), (size_t)length, fault_at)) {
|
2022-04-01 17:58:27 +00:00
|
|
|
dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<void const*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
|
2021-05-28 07:29:16 +00:00
|
|
|
return EFAULT;
|
|
|
|
}
|
2021-09-06 17:24:54 +00:00
|
|
|
return new_string;
|
2021-05-28 07:29:16 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<Time> copy_time_from_user(timespec const* ts_user)
|
2021-02-21 18:18:55 +00:00
|
|
|
{
|
2021-09-06 20:22:14 +00:00
|
|
|
timespec ts {};
|
|
|
|
TRY(copy_from_user(&ts, ts_user, sizeof(timespec)));
|
2021-02-21 18:18:55 +00:00
|
|
|
return Time::from_timespec(ts);
|
|
|
|
}
|
2021-09-06 20:22:14 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<Time> copy_time_from_user(timeval const* tv_user)
|
2021-02-21 18:18:55 +00:00
|
|
|
{
|
2021-09-06 20:22:14 +00:00
|
|
|
timeval tv {};
|
|
|
|
TRY(copy_from_user(&tv, tv_user, sizeof(timeval)));
|
2021-02-21 18:18:55 +00:00
|
|
|
return Time::from_timeval(tv);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<>
|
2022-10-16 22:06:11 +00:00
|
|
|
ErrorOr<Time> copy_time_from_user<timeval const>(Userspace<timeval const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
2021-02-21 18:18:55 +00:00
|
|
|
template<>
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
2021-02-21 18:18:55 +00:00
|
|
|
template<>
|
2022-10-16 22:06:11 +00:00
|
|
|
ErrorOr<Time> copy_time_from_user<timespec const>(Userspace<timespec const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
2021-02-21 18:18:55 +00:00
|
|
|
template<>
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
2021-02-21 18:18:55 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_add_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_exchange_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_exchange_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_load_relaxed(u32 volatile* var)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_load_relaxed(var);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
bool user_atomic_store_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return false; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return false;
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_store_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<bool> user_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(&expected), sizeof(expected)));
|
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_fetch_and_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_and_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_fetch_and_not_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_fetch_or_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_or_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
Optional<u32> user_atomic_fetch_xor_relaxed(u32 volatile* var, u32 val)
|
2020-12-20 01:48:56 +00:00
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
2020-12-20 01:48:56 +00:00
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> copy_to_user(void* dest_ptr, void const* src_ptr, size_t n)
|
2020-02-09 14:28:56 +00:00
|
|
|
{
|
2021-09-05 15:38:37 +00:00
|
|
|
if (!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n))
|
|
|
|
return EFAULT;
|
2021-08-06 11:49:36 +00:00
|
|
|
VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n));
|
2020-02-16 00:27:42 +00:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 03:11:07 +00:00
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
|
2021-03-12 11:28:27 +00:00
|
|
|
dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
|
2021-09-05 15:38:37 +00:00
|
|
|
return EFAULT;
|
2020-09-12 03:11:07 +00:00
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2020-02-09 14:28:56 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> copy_from_user(void* dest_ptr, void const* src_ptr, size_t n)
|
2020-02-09 14:28:56 +00:00
|
|
|
{
|
2021-09-05 15:38:37 +00:00
|
|
|
if (!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n))
|
|
|
|
return EFAULT;
|
2021-08-06 11:49:36 +00:00
|
|
|
VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n));
|
2020-02-16 00:27:42 +00:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 03:11:07 +00:00
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
|
2021-03-12 11:28:27 +00:00
|
|
|
dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
|
2021-09-05 15:38:37 +00:00
|
|
|
return EFAULT;
|
2020-09-12 03:11:07 +00:00
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2020-02-09 14:28:56 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> memset_user(void* dest_ptr, int c, size_t n)
|
2020-02-09 14:28:56 +00:00
|
|
|
{
|
2021-08-06 11:49:36 +00:00
|
|
|
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
|
2020-09-12 03:11:07 +00:00
|
|
|
if (!is_user)
|
2021-09-05 15:38:37 +00:00
|
|
|
return EFAULT;
|
2020-02-16 00:27:42 +00:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 03:11:07 +00:00
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
|
2021-03-12 11:28:27 +00:00
|
|
|
dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
|
2021-09-05 15:38:37 +00:00
|
|
|
return EFAULT;
|
2020-09-12 03:11:07 +00:00
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-09-05 15:38:37 +00:00
|
|
|
}
|
|
|
|
|
2022-10-04 19:04:13 +00:00
|
|
|
#if defined(AK_COMPILER_CLANG) && defined(ENABLE_KERNEL_LTO)
|
2021-10-09 17:05:19 +00:00
|
|
|
// Due to a chicken-and-egg situation, certain linker-defined symbols that are added on-demand (like the GOT)
|
|
|
|
// need to be present before LTO bitcode files are compiled. And since we don't link to any native object files,
|
|
|
|
// the linker does not know that _GLOBAL_OFFSET_TABLE_ is needed, so it doesn't define it, so linking as a PIE fails.
|
|
|
|
// See https://bugs.llvm.org/show_bug.cgi?id=39634
|
|
|
|
FlatPtr missing_got_workaround()
|
|
|
|
{
|
|
|
|
extern volatile FlatPtr _GLOBAL_OFFSET_TABLE_;
|
|
|
|
return _GLOBAL_OFFSET_TABLE_;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-09-05 15:38:37 +00:00
|
|
|
extern "C" {
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
void const* memmem(void const* haystack, size_t haystack_length, void const* needle, size_t needle_length)
|
2021-09-05 15:38:37 +00:00
|
|
|
{
|
|
|
|
return AK::memmem(haystack, haystack_length, needle, needle_length);
|
2020-02-09 14:28:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-11 21:33:37 +00:00
|
|
|
// Functions that are automatically called by the C++ compiler.
|
|
|
|
// Declare them first, to tell the silly compiler that they are indeed being used.
|
2021-04-29 12:54:15 +00:00
|
|
|
[[noreturn]] void __stack_chk_fail() __attribute__((used));
|
|
|
|
[[noreturn]] void __stack_chk_fail_local() __attribute__((used));
|
2020-08-11 21:33:37 +00:00
|
|
|
extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
|
|
|
|
[[noreturn]] void __cxa_pure_virtual();
|
|
|
|
|
2020-02-09 14:28:56 +00:00
|
|
|
[[noreturn]] void __stack_chk_fail()
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-02-09 14:28:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]] void __stack_chk_fail_local()
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-02-09 14:28:56 +00:00
|
|
|
}
|
2020-08-11 21:37:27 +00:00
|
|
|
|
|
|
|
extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-08-11 21:37:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2020-08-11 21:33:37 +00:00
|
|
|
|
|
|
|
[[noreturn]] void __cxa_pure_virtual()
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-08-11 21:33:37 +00:00
|
|
|
}
|
2020-02-09 14:28:56 +00:00
|
|
|
}
|