2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2021-03-02 12:57:34 +00:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2020-09-18 07:49:51 +00:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2021-03-07 20:28:28 +00:00
|
|
|
#include <Kernel/Arch/x86/CPU.h>
|
2021-02-14 09:48:04 +00:00
|
|
|
#include <Kernel/Panic.h>
|
2019-07-19 11:08:26 +00:00
|
|
|
#include <Kernel/Process.h>
|
2020-03-28 08:47:16 +00:00
|
|
|
#include <Kernel/ThreadTracer.h>
|
2019-11-17 11:11:43 +00:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-06-27 19:42:28 +00:00
|
|
|
extern "C" void syscall_handler(TrapFrame*);
|
2019-12-14 15:09:07 +00:00
|
|
|
extern "C" void syscall_asm_entry();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-09-18 07:49:51 +00:00
|
|
|
// clang-format off
|
2021-03-04 16:50:05 +00:00
|
|
|
#if ARCH(I386)
|
2018-11-09 11:20:44 +00:00
|
|
|
asm(
|
2019-12-14 15:09:07 +00:00
|
|
|
".globl syscall_asm_entry\n"
|
|
|
|
"syscall_asm_entry:\n"
|
2019-10-04 14:31:34 +00:00
|
|
|
" pushl $0x0\n"
|
2018-10-16 09:01:38 +00:00
|
|
|
" pusha\n"
|
2019-12-15 11:11:39 +00:00
|
|
|
" pushl %ds\n"
|
|
|
|
" pushl %es\n"
|
|
|
|
" pushl %fs\n"
|
|
|
|
" pushl %gs\n"
|
|
|
|
" pushl %ss\n"
|
2020-06-27 19:42:28 +00:00
|
|
|
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n"
|
2019-12-15 11:11:39 +00:00
|
|
|
" mov %ax, %ds\n"
|
|
|
|
" mov %ax, %es\n"
|
2020-06-27 19:42:28 +00:00
|
|
|
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n"
|
|
|
|
" mov %ax, %fs\n"
|
2019-11-09 21:40:35 +00:00
|
|
|
" cld\n"
|
2020-01-25 09:17:45 +00:00
|
|
|
" xor %esi, %esi\n"
|
|
|
|
" xor %edi, %edi\n"
|
2020-06-27 19:42:28 +00:00
|
|
|
" pushl %esp \n" // set TrapFrame::regs
|
|
|
|
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n"
|
|
|
|
" movl %esp, %ebx \n"
|
|
|
|
" pushl %ebx \n" // push pointer to TrapFrame
|
|
|
|
" call enter_trap_no_irq \n"
|
|
|
|
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
|
|
|
|
" call syscall_handler \n"
|
|
|
|
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
|
|
|
|
" jmp common_trap_exit \n");
|
2021-03-04 16:50:05 +00:00
|
|
|
#elif ARCH(X86_64)
|
|
|
|
asm(
|
|
|
|
".globl syscall_asm_entry\n"
|
|
|
|
"syscall_asm_entry:\n"
|
|
|
|
" cli\n"
|
|
|
|
" hlt\n");
|
|
|
|
#endif
|
2020-09-18 07:49:51 +00:00
|
|
|
// clang-format on
|
2018-10-16 09:01:38 +00:00
|
|
|
|
|
|
|
namespace Syscall {
|
|
|
|
|
2021-03-01 12:49:16 +00:00
|
|
|
static KResultOr<FlatPtr> handle(RegisterState&, FlatPtr function, FlatPtr arg1, FlatPtr arg2, FlatPtr arg3);
|
2019-11-09 21:18:16 +00:00
|
|
|
|
2021-02-19 23:00:19 +00:00
|
|
|
UNMAP_AFTER_INIT void initialize()
|
2020-03-22 00:12:45 +00:00
|
|
|
{
|
2020-03-14 18:19:38 +00:00
|
|
|
register_user_callable_interrupt_handler(syscall_vector, syscall_asm_entry);
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2019-11-09 21:18:16 +00:00
|
|
|
#pragma GCC diagnostic ignored "-Wcast-function-type"
|
2021-03-01 12:49:16 +00:00
|
|
|
typedef KResultOr<FlatPtr> (Process::*Handler)(FlatPtr, FlatPtr, FlatPtr);
|
|
|
|
typedef KResultOr<FlatPtr> (Process::*HandlerWithRegisterState)(RegisterState&);
|
2019-11-09 21:18:16 +00:00
|
|
|
#define __ENUMERATE_SYSCALL(x) reinterpret_cast<Handler>(&Process::sys$##x),
|
2021-04-12 18:59:19 +00:00
|
|
|
static const Handler s_syscall_table[] = {
|
2020-08-04 11:00:50 +00:00
|
|
|
ENUMERATE_SYSCALLS(__ENUMERATE_SYSCALL)
|
2020-03-22 00:12:45 +00:00
|
|
|
};
|
2019-11-09 21:18:16 +00:00
|
|
|
#undef __ENUMERATE_SYSCALL
|
2018-12-19 23:39:29 +00:00
|
|
|
|
2021-03-01 12:49:16 +00:00
|
|
|
KResultOr<FlatPtr> handle(RegisterState& regs, FlatPtr function, FlatPtr arg1, FlatPtr arg2, FlatPtr arg3)
|
2020-03-22 00:12:45 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
2020-06-28 21:34:31 +00:00
|
|
|
auto current_thread = Thread::current();
|
|
|
|
auto& process = current_thread->process();
|
|
|
|
current_thread->did_syscall();
|
2020-03-22 00:12:45 +00:00
|
|
|
|
2021-04-18 06:43:10 +00:00
|
|
|
if (function == SC_exit || function == SC_exit_thread) {
|
2020-03-22 00:12:45 +00:00
|
|
|
// These syscalls need special handling since they never return to the caller.
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
regs.eax = 0;
|
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-11-29 23:05:27 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 18:47:16 +00:00
|
|
|
switch (function) {
|
|
|
|
case SC_exit:
|
2021-03-01 12:49:16 +00:00
|
|
|
process.sys$exit(arg1);
|
2021-03-01 18:47:16 +00:00
|
|
|
break;
|
|
|
|
case SC_exit_thread:
|
2020-08-09 22:45:51 +00:00
|
|
|
process.sys$exit_thread(arg1);
|
2021-03-01 18:47:16 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2019-07-19 07:58:12 +00:00
|
|
|
}
|
2019-11-09 21:18:16 +00:00
|
|
|
|
2021-03-01 12:49:16 +00:00
|
|
|
if (function == SC_fork || function == SC_sigreturn) {
|
|
|
|
// These syscalls want the RegisterState& rather than individual parameters.
|
|
|
|
auto handler = (HandlerWithRegisterState)s_syscall_table[function];
|
|
|
|
return (process.*(handler))(regs);
|
|
|
|
}
|
2020-03-22 00:12:45 +00:00
|
|
|
|
|
|
|
if (function >= Function::__Count) {
|
2020-11-23 13:09:12 +00:00
|
|
|
dbgln("Unknown syscall {} requested ({:08x}, {:08x}, {:08x})", function, arg1, arg2, arg3);
|
2021-03-15 08:04:04 +00:00
|
|
|
return ENOSYS;
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s_syscall_table[function] == nullptr) {
|
2020-11-23 13:09:12 +00:00
|
|
|
dbgln("Null syscall {} requested, you probably need to rebuild this program!", function);
|
2021-03-15 08:04:04 +00:00
|
|
|
return ENOSYS;
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
2021-04-12 18:54:14 +00:00
|
|
|
|
|
|
|
// This appears to be a bogus warning, as s_syscall_table is always
|
|
|
|
// initialized, and the index (function) is always bounded.
|
|
|
|
// TODO: Figure out how to avoid the suppression.
|
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
|
|
|
2020-03-22 00:12:45 +00:00
|
|
|
return (process.*(s_syscall_table[function]))(arg1, arg2, arg3);
|
2021-04-12 18:54:14 +00:00
|
|
|
|
|
|
|
#pragma GCC diagnostic pop
|
2020-03-22 00:12:45 +00:00
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2020-06-27 19:42:28 +00:00
|
|
|
void syscall_handler(TrapFrame* trap)
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2020-06-27 19:42:28 +00:00
|
|
|
auto& regs = *trap->regs;
|
2020-06-28 21:34:31 +00:00
|
|
|
auto current_thread = Thread::current();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
2020-12-09 04:18:45 +00:00
|
|
|
auto& process = current_thread->process();
|
2020-01-19 16:16:38 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-03-28 08:47:16 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
current_thread->yield_if_stopped();
|
|
|
|
|
2020-01-05 17:00:15 +00:00
|
|
|
// Make sure SMAP protection is enabled on syscall entry.
|
|
|
|
clac();
|
|
|
|
|
2020-01-01 22:10:25 +00:00
|
|
|
// Apply a random offset in the range 0-255 to the stack pointer,
|
|
|
|
// to make kernel stacks a bit less deterministic.
|
2021-03-02 12:57:34 +00:00
|
|
|
u32 lsw;
|
|
|
|
u32 msw;
|
|
|
|
read_tsc(lsw, msw);
|
|
|
|
|
|
|
|
auto* ptr = (char*)__builtin_alloca(lsw & 0xff);
|
2020-01-01 22:10:25 +00:00
|
|
|
asm volatile(""
|
|
|
|
: "=m"(*ptr));
|
|
|
|
|
2021-03-01 12:49:16 +00:00
|
|
|
static constexpr FlatPtr iopl_mask = 3u << 12;
|
2020-12-22 17:23:34 +00:00
|
|
|
|
|
|
|
if ((regs.eflags & (iopl_mask)) != 0) {
|
2021-02-14 09:48:04 +00:00
|
|
|
PANIC("Syscall from process with IOPL != 0");
|
2020-12-22 17:23:34 +00:00
|
|
|
}
|
|
|
|
|
2021-02-14 10:44:21 +00:00
|
|
|
// NOTE: We take the big process lock before inspecting memory regions.
|
|
|
|
process.big_lock().lock();
|
|
|
|
|
2020-01-09 17:02:01 +00:00
|
|
|
if (!MM.validate_user_stack(process, VirtualAddress(regs.userspace_esp))) {
|
2020-11-23 13:09:12 +00:00
|
|
|
dbgln("Invalid stack pointer: {:p}", regs.userspace_esp);
|
2019-11-17 11:11:43 +00:00
|
|
|
handle_crash(regs, "Bad stack on syscall entry", SIGSTKFLT);
|
|
|
|
}
|
|
|
|
|
2021-04-20 19:01:13 +00:00
|
|
|
auto* calling_region = MM.find_user_region_from_vaddr(process.space(), VirtualAddress(regs.eip));
|
2019-11-29 15:15:30 +00:00
|
|
|
if (!calling_region) {
|
2020-11-23 13:09:12 +00:00
|
|
|
dbgln("Syscall from {:p} which has no associated region", regs.eip);
|
2019-11-29 15:15:30 +00:00
|
|
|
handle_crash(regs, "Syscall from unknown region", SIGSEGV);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (calling_region->is_writable()) {
|
2020-11-23 13:09:12 +00:00
|
|
|
dbgln("Syscall from writable memory at {:p}", regs.eip);
|
2019-11-29 15:15:30 +00:00
|
|
|
handle_crash(regs, "Syscall from writable memory", SIGSEGV);
|
|
|
|
}
|
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
if (process.space().enforces_syscall_regions() && !calling_region->is_syscall_region()) {
|
2021-02-02 18:56:11 +00:00
|
|
|
dbgln("Syscall from non-syscall region");
|
|
|
|
handle_crash(regs, "Syscall from non-syscall region", SIGSEGV);
|
|
|
|
}
|
|
|
|
|
2021-03-01 12:49:16 +00:00
|
|
|
auto function = regs.eax;
|
|
|
|
auto arg1 = regs.edx;
|
|
|
|
auto arg2 = regs.ecx;
|
|
|
|
auto arg3 = regs.ebx;
|
|
|
|
|
|
|
|
auto result = Syscall::handle(regs, function, arg1, arg2, arg3);
|
|
|
|
if (result.is_error())
|
|
|
|
regs.eax = result.error();
|
|
|
|
else
|
|
|
|
regs.eax = result.value();
|
2020-03-28 08:47:16 +00:00
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
process.big_lock().unlock();
|
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
2020-11-29 23:05:27 +00:00
|
|
|
tracer->set_trace_syscalls(false);
|
2020-12-09 04:18:45 +00:00
|
|
|
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
|
2020-03-28 08:47:16 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 04:29:41 +00:00
|
|
|
current_thread->yield_if_stopped();
|
|
|
|
|
|
|
|
current_thread->check_dispatch_pending_signal();
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
|
|
|
|
2021-01-25 20:19:34 +00:00
|
|
|
// If the previous mode somehow changed something is seriously messed up...
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
2021-01-25 20:19:34 +00:00
|
|
|
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
|
|
|
// Check if we're supposed to return to userspace or just die.
|
2020-06-28 21:34:31 +00:00
|
|
|
current_thread->die_if_needed();
|
2020-01-12 14:04:33 +00:00
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!g_scheduler_lock.own_lock());
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
}
|