2020-07-30 21:38:15 +00:00
|
|
|
/*
|
2021-02-08 18:34:41 +00:00
|
|
|
* Copyright (c) 2020, Itamar S. <itamar8910@gmail.com>
|
|
|
|
* Copyright (c) 2020-2021, Andreas Kling <kling@serenityos.org>
|
2020-07-30 21:38:15 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-07-30 21:38:15 +00:00
|
|
|
*/
|
|
|
|
|
2020-09-12 03:11:07 +00:00
|
|
|
#include <AK/ScopeGuard.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/PrivateInodeVMObject.h>
|
|
|
|
#include <Kernel/Memory/Region.h>
|
2021-09-06 15:22:36 +00:00
|
|
|
#include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/SharedInodeVMObject.h>
|
2023-02-24 17:45:37 +00:00
|
|
|
#include <Kernel/Tasks/Process.h>
|
|
|
|
#include <Kernel/Tasks/Scheduler.h>
|
|
|
|
#include <Kernel/Tasks/ThreadTracer.h>
|
2020-07-30 21:38:15 +00:00
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
static ErrorOr<FlatPtr> handle_ptrace(Kernel::Syscall::SC_ptrace_params const& params, Process& caller)
|
2021-02-08 18:34:41 +00:00
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
2021-02-08 18:34:41 +00:00
|
|
|
if (params.request == PT_TRACE_ME) {
|
2021-08-19 19:45:07 +00:00
|
|
|
if (Process::current().tracer())
|
2021-02-08 18:34:41 +00:00
|
|
|
return EBUSY;
|
|
|
|
|
|
|
|
caller.set_wait_for_tracer_at_next_execve(true);
|
2021-11-07 23:51:39 +00:00
|
|
|
return 0;
|
2021-02-08 18:34:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: PID/TID BUG
|
|
|
|
// This bug allows to request PT_ATTACH (or anything else) the same process, as
|
|
|
|
// long it is not the main thread. Alternatively, if this is desired, then the
|
|
|
|
// bug is that this prevents PT_ATTACH to the main thread from another thread.
|
|
|
|
if (params.tid == caller.pid().value())
|
|
|
|
return EINVAL;
|
|
|
|
|
2023-09-09 15:09:42 +00:00
|
|
|
auto peer = Thread::from_tid_in_same_jail(params.tid);
|
2021-02-08 18:34:41 +00:00
|
|
|
if (!peer)
|
|
|
|
return ESRCH;
|
|
|
|
|
2021-07-17 23:13:34 +00:00
|
|
|
MutexLocker ptrace_locker(peer->process().ptrace_lock());
|
2021-02-08 22:01:53 +00:00
|
|
|
|
2022-08-20 22:21:01 +00:00
|
|
|
auto peer_credentials = peer->process().credentials();
|
|
|
|
auto caller_credentials = caller.credentials();
|
2023-11-05 21:09:46 +00:00
|
|
|
if (!caller_credentials->is_superuser() && ((peer_credentials->uid() != caller_credentials->euid()) || (peer_credentials->uid() != peer_credentials->euid()))) // Disallow tracing setuid processes
|
2021-02-08 18:34:41 +00:00
|
|
|
return EACCES;
|
|
|
|
|
|
|
|
if (!peer->process().is_dumpable())
|
|
|
|
return EACCES;
|
|
|
|
|
|
|
|
auto& peer_process = peer->process();
|
|
|
|
if (params.request == PT_ATTACH) {
|
|
|
|
if (peer_process.tracer()) {
|
|
|
|
return EBUSY;
|
|
|
|
}
|
2021-09-05 14:11:42 +00:00
|
|
|
TRY(peer_process.start_tracing_from(caller.pid()));
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker lock(peer->get_lock());
|
2023-02-04 15:09:27 +00:00
|
|
|
if (peer->state() == Thread::State::Stopped) {
|
|
|
|
peer_process.tracer()->set_regs(peer->get_register_dump_from_stack());
|
|
|
|
} else {
|
2021-02-08 18:34:41 +00:00
|
|
|
peer->send_signal(SIGSTOP, &caller);
|
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return 0;
|
2021-02-08 18:34:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto* tracer = peer_process.tracer();
|
|
|
|
|
|
|
|
if (!tracer)
|
|
|
|
return EPERM;
|
|
|
|
|
|
|
|
if (tracer->tracer_pid() != caller.pid())
|
|
|
|
return EBUSY;
|
|
|
|
|
|
|
|
if (peer->state() == Thread::State::Running)
|
|
|
|
return EBUSY;
|
|
|
|
|
|
|
|
scheduler_lock.unlock();
|
|
|
|
|
|
|
|
switch (params.request) {
|
|
|
|
case PT_CONTINUE:
|
|
|
|
peer->send_signal(SIGCONT, &caller);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_DETACH:
|
|
|
|
peer_process.stop_tracing();
|
|
|
|
peer->send_signal(SIGCONT, &caller);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_SYSCALL:
|
|
|
|
tracer->set_trace_syscalls(true);
|
|
|
|
peer->send_signal(SIGCONT, &caller);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_GETREGS: {
|
|
|
|
if (!tracer->has_regs())
|
|
|
|
return EINVAL;
|
|
|
|
auto* regs = reinterpret_cast<PtraceRegisters*>(params.addr);
|
2021-09-05 15:38:37 +00:00
|
|
|
TRY(copy_to_user(regs, &tracer->regs()));
|
2021-02-08 18:34:41 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PT_SETREGS: {
|
|
|
|
if (!tracer->has_regs())
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
PtraceRegisters regs {};
|
2022-04-01 17:58:27 +00:00
|
|
|
TRY(copy_from_user(®s, (PtraceRegisters const*)params.addr));
|
2021-02-08 18:34:41 +00:00
|
|
|
|
|
|
|
auto& peer_saved_registers = peer->get_register_dump_from_stack();
|
|
|
|
// Verify that the saved registers are in usermode context
|
2023-02-03 14:22:16 +00:00
|
|
|
if (peer_saved_registers.previous_mode() != ExecutionMode::User)
|
2021-02-08 18:34:41 +00:00
|
|
|
return EFAULT;
|
|
|
|
|
|
|
|
tracer->set_regs(regs);
|
|
|
|
copy_ptrace_registers_into_kernel_registers(peer_saved_registers, regs);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PT_PEEK: {
|
2022-04-01 17:58:27 +00:00
|
|
|
auto data = TRY(peer->process().peek_user_data(Userspace<FlatPtr const*> { (FlatPtr)params.addr }));
|
2021-11-25 19:15:02 +00:00
|
|
|
TRY(copy_to_user((FlatPtr*)params.data, &data));
|
2021-02-08 18:34:41 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PT_POKE:
|
2021-11-19 14:13:07 +00:00
|
|
|
TRY(peer->process().poke_user_data(Userspace<FlatPtr*> { (FlatPtr)params.addr }, params.data));
|
2021-11-07 23:51:39 +00:00
|
|
|
return 0;
|
2021-02-08 18:34:41 +00:00
|
|
|
|
2021-11-25 21:55:12 +00:00
|
|
|
case PT_PEEKBUF: {
|
|
|
|
Kernel::Syscall::SC_ptrace_buf_params buf_params {};
|
|
|
|
TRY(copy_from_user(&buf_params, reinterpret_cast<Kernel::Syscall::SC_ptrace_buf_params*>(params.data)));
|
|
|
|
// This is a comparatively large allocation on the Kernel stack.
|
|
|
|
// However, we know that we're close to the root of the call stack, and the following calls shouldn't go too deep.
|
|
|
|
Array<u8, PAGE_SIZE> buf;
|
|
|
|
FlatPtr tracee_ptr = (FlatPtr)params.addr;
|
|
|
|
while (buf_params.buf.size > 0) {
|
|
|
|
size_t copy_this_iteration = min(buf.size(), buf_params.buf.size);
|
2022-04-01 17:58:27 +00:00
|
|
|
TRY(peer->process().peek_user_data(buf.span().slice(0, copy_this_iteration), Userspace<u8 const*> { tracee_ptr }));
|
2021-11-25 21:55:12 +00:00
|
|
|
TRY(copy_to_user((void*)buf_params.buf.data, buf.data(), copy_this_iteration));
|
|
|
|
tracee_ptr += copy_this_iteration;
|
|
|
|
buf_params.buf.data += copy_this_iteration;
|
|
|
|
buf_params.buf.size -= copy_this_iteration;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-04-15 16:34:51 +00:00
|
|
|
case PT_PEEKDEBUG: {
|
2021-11-25 19:15:02 +00:00
|
|
|
auto data = TRY(peer->peek_debug_register(reinterpret_cast<uintptr_t>(params.addr)));
|
|
|
|
TRY(copy_to_user((FlatPtr*)params.data, &data));
|
2021-04-15 16:34:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case PT_POKEDEBUG:
|
2021-11-07 23:51:39 +00:00
|
|
|
TRY(peer->poke_debug_register(reinterpret_cast<uintptr_t>(params.addr), params.data));
|
|
|
|
return 0;
|
2021-02-08 18:34:41 +00:00
|
|
|
default:
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
return 0;
|
2021-02-08 18:34:41 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> Process::sys$ptrace(Userspace<Syscall::SC_ptrace_params const*> user_params)
|
2020-07-30 21:38:15 +00:00
|
|
|
{
|
2022-08-17 20:03:04 +00:00
|
|
|
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
|
2021-12-29 09:11:45 +00:00
|
|
|
TRY(require_promise(Pledge::ptrace));
|
2021-09-05 15:51:37 +00:00
|
|
|
auto params = TRY(copy_typed_from_user(user_params));
|
|
|
|
|
2021-11-27 10:22:25 +00:00
|
|
|
return handle_ptrace(params, *this);
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
/**
|
|
|
|
* "Does this process have a thread that is currently being traced by the provided process?"
|
|
|
|
*/
|
2020-12-09 04:18:45 +00:00
|
|
|
bool Process::has_tracee_thread(ProcessID tracer_pid)
|
2020-07-30 21:38:15 +00:00
|
|
|
{
|
2021-12-18 17:37:21 +00:00
|
|
|
if (auto const* tracer = this->tracer())
|
2020-12-09 04:18:45 +00:00
|
|
|
return tracer->tracer_pid() == tracer_pid;
|
|
|
|
return false;
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> Process::peek_user_data(Userspace<FlatPtr const*> address)
|
2020-07-30 21:38:15 +00:00
|
|
|
{
|
|
|
|
// This function can be called from the context of another
|
|
|
|
// process that called PT_PEEK
|
2021-09-06 15:22:36 +00:00
|
|
|
ScopedAddressSpaceSwitcher switcher(*this);
|
2021-12-17 08:12:20 +00:00
|
|
|
return TRY(copy_typed_from_user(address));
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<void> Process::peek_user_data(Span<u8> destination, Userspace<u8 const*> address)
|
2021-11-25 21:55:12 +00:00
|
|
|
{
|
|
|
|
// This function can be called from the context of another
|
|
|
|
// process that called PT_PEEKBUF
|
|
|
|
ScopedAddressSpaceSwitcher switcher(*this);
|
|
|
|
TRY(copy_from_user(destination.data(), address, destination.size()));
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2021-11-19 14:13:07 +00:00
|
|
|
ErrorOr<void> Process::poke_user_data(Userspace<FlatPtr*> address, FlatPtr data)
|
2020-07-30 21:38:15 +00:00
|
|
|
{
|
2021-11-19 14:13:07 +00:00
|
|
|
Memory::VirtualRange range = { address.vaddr(), sizeof(FlatPtr) };
|
2022-08-23 15:58:05 +00:00
|
|
|
|
|
|
|
return address_space().with([&](auto& space) -> ErrorOr<void> {
|
|
|
|
auto* region = space->find_region_containing(range);
|
|
|
|
if (!region)
|
|
|
|
return EFAULT;
|
|
|
|
ScopedAddressSpaceSwitcher switcher(*this);
|
|
|
|
if (region->is_shared()) {
|
|
|
|
// If the region is shared, we change its vmobject to a PrivateInodeVMObject
|
|
|
|
// to prevent the write operation from changing any shared inode data
|
|
|
|
VERIFY(region->vmobject().is_shared_inode());
|
|
|
|
auto vmobject = TRY(Memory::PrivateInodeVMObject::try_create_with_inode(static_cast<Memory::SharedInodeVMObject&>(region->vmobject()).inode()));
|
|
|
|
region->set_vmobject(move(vmobject));
|
|
|
|
region->set_shared(false);
|
|
|
|
}
|
|
|
|
bool const was_writable = region->is_writable();
|
2020-09-12 03:11:07 +00:00
|
|
|
if (!was_writable) {
|
2022-08-23 15:58:05 +00:00
|
|
|
region->set_writable(true);
|
2020-09-12 03:11:07 +00:00
|
|
|
region->remap();
|
|
|
|
}
|
2022-08-23 15:58:05 +00:00
|
|
|
ScopeGuard rollback([&]() {
|
|
|
|
if (!was_writable) {
|
|
|
|
region->set_writable(false);
|
|
|
|
region->remap();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
return copy_to_user(address, &data);
|
2020-09-12 03:11:07 +00:00
|
|
|
});
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
|
|
|
|
2021-11-19 14:13:07 +00:00
|
|
|
ErrorOr<FlatPtr> Thread::peek_debug_register(u32 register_index)
|
2021-04-15 16:34:51 +00:00
|
|
|
{
|
2023-01-25 15:12:08 +00:00
|
|
|
#if ARCH(X86_64)
|
2021-11-19 14:13:07 +00:00
|
|
|
FlatPtr data;
|
2021-04-15 16:34:51 +00:00
|
|
|
switch (register_index) {
|
|
|
|
case 0:
|
|
|
|
data = m_debug_register_state.dr0;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
data = m_debug_register_state.dr1;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
data = m_debug_register_state.dr2;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
data = m_debug_register_state.dr3;
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
data = m_debug_register_state.dr6;
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
data = m_debug_register_state.dr7;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
return data;
|
2023-01-25 15:12:08 +00:00
|
|
|
#elif ARCH(AARCH64)
|
|
|
|
(void)register_index;
|
|
|
|
TODO_AARCH64();
|
|
|
|
#else
|
|
|
|
# error "Unknown architecture"
|
|
|
|
#endif
|
2021-04-15 16:34:51 +00:00
|
|
|
}
|
|
|
|
|
2021-11-19 14:13:07 +00:00
|
|
|
ErrorOr<void> Thread::poke_debug_register(u32 register_index, FlatPtr data)
|
2021-04-15 16:34:51 +00:00
|
|
|
{
|
2023-01-25 15:12:08 +00:00
|
|
|
#if ARCH(X86_64)
|
2021-04-15 16:34:51 +00:00
|
|
|
switch (register_index) {
|
|
|
|
case 0:
|
|
|
|
m_debug_register_state.dr0 = data;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
m_debug_register_state.dr1 = data;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
m_debug_register_state.dr2 = data;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
m_debug_register_state.dr3 = data;
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
m_debug_register_state.dr7 = data;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return EINVAL;
|
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2023-01-25 15:12:08 +00:00
|
|
|
#elif ARCH(AARCH64)
|
|
|
|
(void)register_index;
|
|
|
|
(void)data;
|
|
|
|
TODO_AARCH64();
|
|
|
|
#else
|
|
|
|
# error "Unknown architecture"
|
|
|
|
#endif
|
2021-04-15 16:34:51 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|