2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2021-03-02 12:57:34 +00:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2022-02-09 18:33:39 +00:00
|
|
|
* Copyright (c) 2022, the SerenityOS developers.
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2020-09-18 07:49:51 +00:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2023-01-25 19:30:00 +00:00
|
|
|
#include <Kernel/Arch/RegisterState.h>
|
2022-10-16 14:57:21 +00:00
|
|
|
#include <Kernel/Arch/TrapFrame.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/MemoryManager.h>
|
2021-02-14 09:48:04 +00:00
|
|
|
#include <Kernel/Panic.h>
|
2021-08-10 19:02:59 +00:00
|
|
|
#include <Kernel/PerformanceManager.h>
|
2019-07-19 11:08:26 +00:00
|
|
|
#include <Kernel/Process.h>
|
2022-01-29 12:08:37 +00:00
|
|
|
#include <Kernel/Scheduler.h>
|
2021-06-22 15:40:16 +00:00
|
|
|
#include <Kernel/Sections.h>
|
2020-03-28 08:47:16 +00:00
|
|
|
#include <Kernel/ThreadTracer.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
namespace Syscall {
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
using Handler = auto(Process::*)(FlatPtr, FlatPtr, FlatPtr, FlatPtr) -> ErrorOr<FlatPtr>;
|
|
|
|
using HandlerWithRegisterState = auto(Process::*)(RegisterState&) -> ErrorOr<FlatPtr>;
|
2021-09-04 19:53:24 +00:00
|
|
|
|
2021-07-18 16:18:35 +00:00
|
|
|
struct HandlerMetadata {
|
|
|
|
Handler handler;
|
|
|
|
NeedsBigProcessLock needs_lock;
|
|
|
|
};
|
|
|
|
|
2021-09-04 19:53:24 +00:00
|
|
|
#define __ENUMERATE_SYSCALL(sys_call, needs_lock) { bit_cast<Handler>(&Process::sys$##sys_call), needs_lock },
|
2021-07-18 16:18:35 +00:00
|
|
|
static const HandlerMetadata s_syscall_table[] = {
|
2020-08-04 11:00:50 +00:00
|
|
|
ENUMERATE_SYSCALLS(__ENUMERATE_SYSCALL)
|
2020-03-22 00:12:45 +00:00
|
|
|
};
|
2019-11-09 21:18:16 +00:00
|
|
|
#undef __ENUMERATE_SYSCALL
|
2018-12-19 23:39:29 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> handle(RegisterState& regs, FlatPtr function, FlatPtr arg1, FlatPtr arg2, FlatPtr arg3, FlatPtr arg4)
|
2020-03-22 00:12:45 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
2021-12-29 00:01:27 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2020-06-28 21:34:31 +00:00
|
|
|
auto& process = current_thread->process();
|
|
|
|
current_thread->did_syscall();
|
2020-03-22 00:12:45 +00:00
|
|
|
|
2021-08-10 19:02:59 +00:00
|
|
|
PerformanceManager::add_syscall_event(*current_thread, regs);
|
|
|
|
|
2021-07-18 17:42:22 +00:00
|
|
|
if (function >= Function::__Count) {
|
2021-07-24 00:15:07 +00:00
|
|
|
dbgln("Unknown syscall {} requested ({:p}, {:p}, {:p}, {:p})", function, arg1, arg2, arg3, arg4);
|
2021-07-18 17:42:22 +00:00
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
auto const syscall_metadata = s_syscall_table[function];
|
2021-07-18 17:42:22 +00:00
|
|
|
if (syscall_metadata.handler == nullptr) {
|
|
|
|
dbgln("Null syscall {} requested, you probably need to rebuild this program!", function);
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
MutexLocker mutex_locker;
|
2022-04-01 17:58:27 +00:00
|
|
|
auto const needs_big_lock = syscall_metadata.needs_lock == NeedsBigProcessLock::Yes;
|
2021-07-18 17:42:22 +00:00
|
|
|
if (needs_big_lock) {
|
|
|
|
mutex_locker.attach_and_lock(process.big_lock());
|
|
|
|
};
|
|
|
|
|
2021-04-18 06:43:10 +00:00
|
|
|
if (function == SC_exit || function == SC_exit_thread) {
|
2020-03-22 00:12:45 +00:00
|
|
|
// These syscalls need special handling since they never return to the caller.
|
2021-07-18 17:42:22 +00:00
|
|
|
// In these cases the process big lock will get released on the exit of the thread.
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2021-07-18 23:50:08 +00:00
|
|
|
regs.set_return_reg(0);
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-11-29 23:05:27 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 18:47:16 +00:00
|
|
|
switch (function) {
|
|
|
|
case SC_exit:
|
2021-03-01 12:49:16 +00:00
|
|
|
process.sys$exit(arg1);
|
2021-03-01 18:47:16 +00:00
|
|
|
case SC_exit_thread:
|
2021-05-28 09:20:22 +00:00
|
|
|
process.sys$exit_thread(arg1, arg2, arg3);
|
2021-03-01 18:47:16 +00:00
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2019-07-19 07:58:12 +00:00
|
|
|
}
|
2019-11-09 21:18:16 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> result { FlatPtr(nullptr) };
|
2021-03-01 12:49:16 +00:00
|
|
|
if (function == SC_fork || function == SC_sigreturn) {
|
|
|
|
// These syscalls want the RegisterState& rather than individual parameters.
|
2021-09-04 19:53:24 +00:00
|
|
|
auto handler = bit_cast<HandlerWithRegisterState>(syscall_metadata.handler);
|
2021-07-18 17:42:22 +00:00
|
|
|
result = (process.*(handler))(regs);
|
|
|
|
} else {
|
2021-07-24 00:15:07 +00:00
|
|
|
result = (process.*(syscall_metadata.handler))(arg1, arg2, arg3, arg4);
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
2021-04-12 18:54:14 +00:00
|
|
|
|
2021-07-18 17:42:22 +00:00
|
|
|
return result;
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 19:30:00 +00:00
|
|
|
extern "C" NEVER_INLINE void syscall_handler(TrapFrame* trap);
|
2021-04-29 12:54:15 +00:00
|
|
|
NEVER_INLINE void syscall_handler(TrapFrame* trap)
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2023-01-25 15:04:13 +00:00
|
|
|
#if ARCH(X86_64)
|
2021-12-17 10:34:29 +00:00
|
|
|
// Make sure SMAP protection is enabled on syscall entry.
|
|
|
|
clac();
|
2023-01-25 15:04:13 +00:00
|
|
|
#elif ARCH(AARCH64)
|
|
|
|
// FIXME: Implement the security mechanism for aarch64
|
|
|
|
#else
|
|
|
|
# error Unknown architecture
|
|
|
|
#endif
|
2021-12-17 10:34:29 +00:00
|
|
|
|
2020-06-27 19:42:28 +00:00
|
|
|
auto& regs = *trap->regs;
|
2021-12-29 00:01:27 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2023-01-08 15:16:08 +00:00
|
|
|
VERIFY(current_thread->previous_mode() == ExecutionMode::User);
|
2020-12-09 04:18:45 +00:00
|
|
|
auto& process = current_thread->process();
|
2021-07-13 23:15:24 +00:00
|
|
|
if (process.is_dying()) {
|
|
|
|
// It's possible this thread is just about to make a syscall while another is
|
|
|
|
// is killing our process.
|
|
|
|
current_thread->die_if_needed();
|
|
|
|
return;
|
|
|
|
}
|
2020-01-19 16:16:38 +00:00
|
|
|
|
2021-12-29 00:01:27 +00:00
|
|
|
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-03-28 08:47:16 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
current_thread->yield_if_stopped();
|
|
|
|
|
2023-01-25 15:04:13 +00:00
|
|
|
#if ARCH(X86_64)
|
2020-01-01 22:10:25 +00:00
|
|
|
// Apply a random offset in the range 0-255 to the stack pointer,
|
|
|
|
// to make kernel stacks a bit less deterministic.
|
2021-03-02 12:57:34 +00:00
|
|
|
u32 lsw;
|
|
|
|
u32 msw;
|
|
|
|
read_tsc(lsw, msw);
|
|
|
|
|
|
|
|
auto* ptr = (char*)__builtin_alloca(lsw & 0xff);
|
2020-01-01 22:10:25 +00:00
|
|
|
asm volatile(""
|
|
|
|
: "=m"(*ptr));
|
|
|
|
|
2022-02-09 18:33:39 +00:00
|
|
|
constexpr FlatPtr iopl_mask = 3u << 12;
|
2020-12-22 17:23:34 +00:00
|
|
|
|
2021-07-23 15:17:36 +00:00
|
|
|
FlatPtr flags = regs.flags();
|
2021-06-26 12:56:28 +00:00
|
|
|
if ((flags & (iopl_mask)) != 0) {
|
2021-02-14 09:48:04 +00:00
|
|
|
PANIC("Syscall from process with IOPL != 0");
|
2020-12-22 17:23:34 +00:00
|
|
|
}
|
2023-01-25 15:04:13 +00:00
|
|
|
#elif ARCH(AARCH64)
|
|
|
|
// FIXME: Implement the security mechanism for aarch64
|
|
|
|
#else
|
|
|
|
# error Unknown architecture
|
|
|
|
#endif
|
2020-12-22 17:23:34 +00:00
|
|
|
|
2022-08-23 15:58:05 +00:00
|
|
|
Memory::MemoryManager::validate_syscall_preconditions(process, regs);
|
2021-02-02 18:56:11 +00:00
|
|
|
|
2021-07-18 23:50:08 +00:00
|
|
|
FlatPtr function;
|
|
|
|
FlatPtr arg1;
|
|
|
|
FlatPtr arg2;
|
|
|
|
FlatPtr arg3;
|
2021-07-24 00:15:07 +00:00
|
|
|
FlatPtr arg4;
|
|
|
|
regs.capture_syscall_params(function, arg1, arg2, arg3, arg4);
|
2021-03-01 12:49:16 +00:00
|
|
|
|
2021-07-24 00:15:07 +00:00
|
|
|
auto result = Syscall::handle(regs, function, arg1, arg2, arg3, arg4);
|
2021-07-18 17:42:22 +00:00
|
|
|
|
2021-06-26 12:56:28 +00:00
|
|
|
if (result.is_error()) {
|
2021-11-07 23:51:39 +00:00
|
|
|
regs.set_return_reg(-result.error().code());
|
2021-06-26 12:56:28 +00:00
|
|
|
} else {
|
2021-07-18 23:50:08 +00:00
|
|
|
regs.set_return_reg(result.value());
|
2021-06-26 12:56:28 +00:00
|
|
|
}
|
2020-03-28 08:47:16 +00:00
|
|
|
|
2021-12-29 00:01:27 +00:00
|
|
|
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-03-28 08:47:16 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
current_thread->yield_if_stopped();
|
|
|
|
|
|
|
|
current_thread->check_dispatch_pending_signal();
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
|
|
|
|
2021-01-25 20:19:34 +00:00
|
|
|
// If the previous mode somehow changed something is seriously messed up...
|
2023-01-08 15:16:08 +00:00
|
|
|
VERIFY(current_thread->previous_mode() == ExecutionMode::User);
|
2021-01-25 20:19:34 +00:00
|
|
|
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
|
|
|
// Check if we're supposed to return to userspace or just die.
|
2020-06-28 21:34:31 +00:00
|
|
|
current_thread->die_if_needed();
|
2020-01-12 14:04:33 +00:00
|
|
|
|
2022-09-09 21:53:53 +00:00
|
|
|
// Crash any processes which have committed a promise violation during syscall handling.
|
2021-12-29 12:11:51 +00:00
|
|
|
if (result.is_error() && result.error().code() == EPROMISEVIOLATION) {
|
|
|
|
VERIFY(current_thread->is_promise_violation_pending());
|
|
|
|
current_thread->set_promise_violation_pending(false);
|
2023-01-30 11:10:29 +00:00
|
|
|
process.crash(SIGABRT, {});
|
2021-12-29 12:11:51 +00:00
|
|
|
} else {
|
|
|
|
VERIFY(!current_thread->is_promise_violation_pending());
|
|
|
|
}
|
2021-12-29 09:11:45 +00:00
|
|
|
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
}
|