2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2021-03-02 12:57:34 +00:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2020-09-18 07:49:51 +00:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2021-06-21 15:34:09 +00:00
|
|
|
#include <Kernel/Arch/x86/Interrupts.h>
|
|
|
|
#include <Kernel/Arch/x86/TrapFrame.h>
|
2021-02-14 09:48:04 +00:00
|
|
|
#include <Kernel/Panic.h>
|
2019-07-19 11:08:26 +00:00
|
|
|
#include <Kernel/Process.h>
|
2021-06-22 15:40:16 +00:00
|
|
|
#include <Kernel/Sections.h>
|
2020-03-28 08:47:16 +00:00
|
|
|
#include <Kernel/ThreadTracer.h>
|
2019-11-17 11:11:43 +00:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2021-04-29 12:54:15 +00:00
|
|
|
extern "C" void syscall_handler(TrapFrame*) __attribute__((used));
|
2019-12-14 15:09:07 +00:00
|
|
|
extern "C" void syscall_asm_entry();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-07-05 13:34:07 +00:00
|
|
|
NEVER_INLINE NAKED void syscall_asm_entry()
|
2021-05-28 14:08:51 +00:00
|
|
|
{
|
|
|
|
// clang-format off
|
2021-03-04 16:50:05 +00:00
|
|
|
#if ARCH(I386)
|
2021-05-28 14:08:51 +00:00
|
|
|
asm(
|
|
|
|
" pushl $0x0\n"
|
|
|
|
" pusha\n"
|
|
|
|
" pushl %ds\n"
|
|
|
|
" pushl %es\n"
|
|
|
|
" pushl %fs\n"
|
|
|
|
" pushl %gs\n"
|
|
|
|
" pushl %ss\n"
|
|
|
|
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n"
|
|
|
|
" mov %ax, %ds\n"
|
|
|
|
" mov %ax, %es\n"
|
|
|
|
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n"
|
2021-07-02 12:02:36 +00:00
|
|
|
" mov %ax, %gs\n"
|
2021-05-28 14:08:51 +00:00
|
|
|
" cld\n"
|
|
|
|
" xor %esi, %esi\n"
|
|
|
|
" xor %edi, %edi\n"
|
|
|
|
" pushl %esp \n" // set TrapFrame::regs
|
|
|
|
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n"
|
|
|
|
" movl %esp, %ebx \n"
|
|
|
|
" pushl %ebx \n" // push pointer to TrapFrame
|
|
|
|
" call enter_trap_no_irq \n"
|
|
|
|
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
|
|
|
|
" call syscall_handler \n"
|
|
|
|
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
|
|
|
|
" jmp common_trap_exit \n");
|
2021-03-04 16:50:05 +00:00
|
|
|
#elif ARCH(X86_64)
|
|
|
|
asm(
|
2021-06-28 18:57:44 +00:00
|
|
|
" pushq $0x0\n"
|
|
|
|
" pushq %r15\n"
|
|
|
|
" pushq %r14\n"
|
|
|
|
" pushq %r13\n"
|
|
|
|
" pushq %r12\n"
|
|
|
|
" pushq %r11\n"
|
|
|
|
" pushq %r10\n"
|
|
|
|
" pushq %r9\n"
|
|
|
|
" pushq %r8\n"
|
|
|
|
" pushq %rax\n"
|
|
|
|
" pushq %rcx\n"
|
|
|
|
" pushq %rdx\n"
|
|
|
|
" pushq %rbx\n"
|
|
|
|
" pushq %rsp\n"
|
|
|
|
" pushq %rbp\n"
|
|
|
|
" pushq %rsi\n"
|
|
|
|
" pushq %rdi\n"
|
|
|
|
" pushq %rsp \n" /* set TrapFrame::regs */
|
|
|
|
" subq $" __STRINGIFY(TRAP_FRAME_SIZE - 8) ", %rsp \n"
|
2021-06-30 11:09:18 +00:00
|
|
|
" movq %rsp, %rdi \n"
|
2021-06-28 18:57:44 +00:00
|
|
|
" cld\n"
|
|
|
|
" call enter_trap_no_irq \n"
|
2021-06-30 11:09:18 +00:00
|
|
|
" movq %rsp, %rdi \n"
|
2021-06-28 18:57:44 +00:00
|
|
|
" call syscall_handler\n"
|
|
|
|
" jmp common_trap_exit \n");
|
2021-03-04 16:50:05 +00:00
|
|
|
#endif
|
2021-05-28 14:08:51 +00:00
|
|
|
// clang-format on
|
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
|
|
|
|
namespace Syscall {
|
|
|
|
|
2021-07-24 00:15:07 +00:00
|
|
|
static KResultOr<FlatPtr> handle(RegisterState&, FlatPtr function, FlatPtr arg1, FlatPtr arg2, FlatPtr arg3, FlatPtr arg4);
|
2019-11-09 21:18:16 +00:00
|
|
|
|
2021-02-19 23:00:19 +00:00
|
|
|
UNMAP_AFTER_INIT void initialize()
|
2020-03-22 00:12:45 +00:00
|
|
|
{
|
2020-03-14 18:19:38 +00:00
|
|
|
register_user_callable_interrupt_handler(syscall_vector, syscall_asm_entry);
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2019-11-09 21:18:16 +00:00
|
|
|
#pragma GCC diagnostic ignored "-Wcast-function-type"
|
2021-07-24 00:15:07 +00:00
|
|
|
typedef KResultOr<FlatPtr> (Process::*Handler)(FlatPtr, FlatPtr, FlatPtr, FlatPtr);
|
2021-03-01 12:49:16 +00:00
|
|
|
typedef KResultOr<FlatPtr> (Process::*HandlerWithRegisterState)(RegisterState&);
|
2021-07-18 16:18:35 +00:00
|
|
|
struct HandlerMetadata {
|
|
|
|
Handler handler;
|
|
|
|
NeedsBigProcessLock needs_lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define __ENUMERATE_SYSCALL(sys_call, needs_lock) { reinterpret_cast<Handler>(&Process::sys$##sys_call), needs_lock },
|
|
|
|
static const HandlerMetadata s_syscall_table[] = {
|
2020-08-04 11:00:50 +00:00
|
|
|
ENUMERATE_SYSCALLS(__ENUMERATE_SYSCALL)
|
2020-03-22 00:12:45 +00:00
|
|
|
};
|
2019-11-09 21:18:16 +00:00
|
|
|
#undef __ENUMERATE_SYSCALL
|
2018-12-19 23:39:29 +00:00
|
|
|
|
2021-07-24 00:15:07 +00:00
|
|
|
KResultOr<FlatPtr> handle(RegisterState& regs, FlatPtr function, FlatPtr arg1, FlatPtr arg2, FlatPtr arg3, FlatPtr arg4)
|
2020-03-22 00:12:45 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
2020-06-28 21:34:31 +00:00
|
|
|
auto current_thread = Thread::current();
|
|
|
|
auto& process = current_thread->process();
|
|
|
|
current_thread->did_syscall();
|
2020-03-22 00:12:45 +00:00
|
|
|
|
2021-07-18 17:42:22 +00:00
|
|
|
if (function >= Function::__Count) {
|
2021-07-24 00:15:07 +00:00
|
|
|
dbgln("Unknown syscall {} requested ({:p}, {:p}, {:p}, {:p})", function, arg1, arg2, arg3, arg4);
|
2021-07-18 17:42:22 +00:00
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto syscall_metadata = s_syscall_table[function];
|
|
|
|
if (syscall_metadata.handler == nullptr) {
|
|
|
|
dbgln("Null syscall {} requested, you probably need to rebuild this program!", function);
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
MutexLocker mutex_locker;
|
|
|
|
const auto needs_big_lock = syscall_metadata.needs_lock == NeedsBigProcessLock::Yes;
|
|
|
|
if (needs_big_lock) {
|
|
|
|
mutex_locker.attach_and_lock(process.big_lock());
|
|
|
|
};
|
|
|
|
|
2021-04-18 06:43:10 +00:00
|
|
|
if (function == SC_exit || function == SC_exit_thread) {
|
2020-03-22 00:12:45 +00:00
|
|
|
// These syscalls need special handling since they never return to the caller.
|
2021-07-18 17:42:22 +00:00
|
|
|
// In these cases the process big lock will get released on the exit of the thread.
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2021-07-18 23:50:08 +00:00
|
|
|
regs.set_return_reg(0);
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-11-29 23:05:27 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 18:47:16 +00:00
|
|
|
switch (function) {
|
|
|
|
case SC_exit:
|
2021-03-01 12:49:16 +00:00
|
|
|
process.sys$exit(arg1);
|
2021-03-01 18:47:16 +00:00
|
|
|
break;
|
|
|
|
case SC_exit_thread:
|
2021-05-28 09:20:22 +00:00
|
|
|
process.sys$exit_thread(arg1, arg2, arg3);
|
2021-03-01 18:47:16 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2019-07-19 07:58:12 +00:00
|
|
|
}
|
2019-11-09 21:18:16 +00:00
|
|
|
|
2021-07-18 17:42:22 +00:00
|
|
|
KResultOr<FlatPtr> result { FlatPtr(nullptr) };
|
2021-03-01 12:49:16 +00:00
|
|
|
if (function == SC_fork || function == SC_sigreturn) {
|
|
|
|
// These syscalls want the RegisterState& rather than individual parameters.
|
2021-07-18 17:42:22 +00:00
|
|
|
auto handler = (HandlerWithRegisterState)syscall_metadata.handler;
|
|
|
|
result = (process.*(handler))(regs);
|
|
|
|
} else {
|
2021-07-24 00:15:07 +00:00
|
|
|
result = (process.*(syscall_metadata.handler))(arg1, arg2, arg3, arg4);
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
2021-04-12 18:54:14 +00:00
|
|
|
|
2021-07-18 17:42:22 +00:00
|
|
|
return result;
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 12:54:15 +00:00
|
|
|
NEVER_INLINE void syscall_handler(TrapFrame* trap)
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2020-06-27 19:42:28 +00:00
|
|
|
auto& regs = *trap->regs;
|
2020-06-28 21:34:31 +00:00
|
|
|
auto current_thread = Thread::current();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
2020-12-09 04:18:45 +00:00
|
|
|
auto& process = current_thread->process();
|
2021-07-13 23:15:24 +00:00
|
|
|
if (process.is_dying()) {
|
|
|
|
// It's possible this thread is just about to make a syscall while another is
|
|
|
|
// is killing our process.
|
|
|
|
current_thread->die_if_needed();
|
|
|
|
return;
|
|
|
|
}
|
2020-01-19 16:16:38 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-03-28 08:47:16 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
current_thread->yield_if_stopped();
|
|
|
|
|
2020-01-05 17:00:15 +00:00
|
|
|
// Make sure SMAP protection is enabled on syscall entry.
|
|
|
|
clac();
|
|
|
|
|
2020-01-01 22:10:25 +00:00
|
|
|
// Apply a random offset in the range 0-255 to the stack pointer,
|
|
|
|
// to make kernel stacks a bit less deterministic.
|
2021-03-02 12:57:34 +00:00
|
|
|
u32 lsw;
|
|
|
|
u32 msw;
|
|
|
|
read_tsc(lsw, msw);
|
|
|
|
|
|
|
|
auto* ptr = (char*)__builtin_alloca(lsw & 0xff);
|
2020-01-01 22:10:25 +00:00
|
|
|
asm volatile(""
|
|
|
|
: "=m"(*ptr));
|
|
|
|
|
2021-03-01 12:49:16 +00:00
|
|
|
static constexpr FlatPtr iopl_mask = 3u << 12;
|
2020-12-22 17:23:34 +00:00
|
|
|
|
2021-07-23 15:17:36 +00:00
|
|
|
FlatPtr flags = regs.flags();
|
2021-06-26 12:56:28 +00:00
|
|
|
if ((flags & (iopl_mask)) != 0) {
|
2021-02-14 09:48:04 +00:00
|
|
|
PANIC("Syscall from process with IOPL != 0");
|
2020-12-22 17:23:34 +00:00
|
|
|
}
|
|
|
|
|
2021-07-18 16:31:28 +00:00
|
|
|
MM.validate_syscall_preconditions(process.space(), regs);
|
2021-02-02 18:56:11 +00:00
|
|
|
|
2021-07-18 23:50:08 +00:00
|
|
|
FlatPtr function;
|
|
|
|
FlatPtr arg1;
|
|
|
|
FlatPtr arg2;
|
|
|
|
FlatPtr arg3;
|
2021-07-24 00:15:07 +00:00
|
|
|
FlatPtr arg4;
|
|
|
|
regs.capture_syscall_params(function, arg1, arg2, arg3, arg4);
|
2021-03-01 12:49:16 +00:00
|
|
|
|
2021-07-24 00:15:07 +00:00
|
|
|
auto result = Syscall::handle(regs, function, arg1, arg2, arg3, arg4);
|
2021-07-18 17:42:22 +00:00
|
|
|
|
2021-06-26 12:56:28 +00:00
|
|
|
if (result.is_error()) {
|
2021-07-18 23:50:08 +00:00
|
|
|
regs.set_return_reg(result.error());
|
2021-06-26 12:56:28 +00:00
|
|
|
} else {
|
2021-07-18 23:50:08 +00:00
|
|
|
regs.set_return_reg(result.value());
|
2021-06-26 12:56:28 +00:00
|
|
|
}
|
2020-03-28 08:47:16 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-03-28 08:47:16 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
current_thread->yield_if_stopped();
|
|
|
|
|
|
|
|
current_thread->check_dispatch_pending_signal();
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
|
|
|
|
2021-01-25 20:19:34 +00:00
|
|
|
// If the previous mode somehow changed something is seriously messed up...
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
2021-01-25 20:19:34 +00:00
|
|
|
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
|
|
|
// Check if we're supposed to return to userspace or just die.
|
2020-06-28 21:34:31 +00:00
|
|
|
current_thread->die_if_needed();
|
2020-01-12 14:04:33 +00:00
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!g_scheduler_lock.own_lock());
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
}
|