mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-29 19:10:26 +00:00
38ddf301f6
This makes the types used in the PT_PEEK and PT_POKE actions suitable for 64-bit platforms as well.
261 lines
7.7 KiB
C++
261 lines
7.7 KiB
C++
/*
|
|
* Copyright (c) 2020, Itamar S. <itamar8910@gmail.com>
|
|
* Copyright (c) 2020-2021, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#include <AK/ScopeGuard.h>
|
|
#include <Kernel/Memory/MemoryManager.h>
|
|
#include <Kernel/Memory/PrivateInodeVMObject.h>
|
|
#include <Kernel/Memory/Region.h>
|
|
#include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
|
|
#include <Kernel/Memory/SharedInodeVMObject.h>
|
|
#include <Kernel/Process.h>
|
|
#include <Kernel/ThreadTracer.h>
|
|
|
|
namespace Kernel {
|
|
|
|
static ErrorOr<FlatPtr> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& params, Process& caller)
|
|
{
|
|
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
|
if (params.request == PT_TRACE_ME) {
|
|
if (Process::current().tracer())
|
|
return EBUSY;
|
|
|
|
caller.set_wait_for_tracer_at_next_execve(true);
|
|
return 0;
|
|
}
|
|
|
|
// FIXME: PID/TID BUG
|
|
// This bug allows to request PT_ATTACH (or anything else) the same process, as
|
|
// long it is not the main thread. Alternatively, if this is desired, then the
|
|
// bug is that this prevents PT_ATTACH to the main thread from another thread.
|
|
if (params.tid == caller.pid().value())
|
|
return EINVAL;
|
|
|
|
auto peer = Thread::from_tid(params.tid);
|
|
if (!peer)
|
|
return ESRCH;
|
|
|
|
MutexLocker ptrace_locker(peer->process().ptrace_lock());
|
|
|
|
if ((peer->process().uid() != caller.euid())
|
|
|| (peer->process().uid() != peer->process().euid())) // Disallow tracing setuid processes
|
|
return EACCES;
|
|
|
|
if (!peer->process().is_dumpable())
|
|
return EACCES;
|
|
|
|
auto& peer_process = peer->process();
|
|
if (params.request == PT_ATTACH) {
|
|
if (peer_process.tracer()) {
|
|
return EBUSY;
|
|
}
|
|
TRY(peer_process.start_tracing_from(caller.pid()));
|
|
SpinlockLocker lock(peer->get_lock());
|
|
if (peer->state() != Thread::State::Stopped) {
|
|
peer->send_signal(SIGSTOP, &caller);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
auto* tracer = peer_process.tracer();
|
|
|
|
if (!tracer)
|
|
return EPERM;
|
|
|
|
if (tracer->tracer_pid() != caller.pid())
|
|
return EBUSY;
|
|
|
|
if (peer->state() == Thread::State::Running)
|
|
return EBUSY;
|
|
|
|
scheduler_lock.unlock();
|
|
|
|
switch (params.request) {
|
|
case PT_CONTINUE:
|
|
peer->send_signal(SIGCONT, &caller);
|
|
break;
|
|
|
|
case PT_DETACH:
|
|
peer_process.stop_tracing();
|
|
peer->send_signal(SIGCONT, &caller);
|
|
break;
|
|
|
|
case PT_SYSCALL:
|
|
tracer->set_trace_syscalls(true);
|
|
peer->send_signal(SIGCONT, &caller);
|
|
break;
|
|
|
|
case PT_GETREGS: {
|
|
if (!tracer->has_regs())
|
|
return EINVAL;
|
|
auto* regs = reinterpret_cast<PtraceRegisters*>(params.addr);
|
|
TRY(copy_to_user(regs, &tracer->regs()));
|
|
break;
|
|
}
|
|
|
|
case PT_SETREGS: {
|
|
if (!tracer->has_regs())
|
|
return EINVAL;
|
|
|
|
PtraceRegisters regs {};
|
|
TRY(copy_from_user(®s, (const PtraceRegisters*)params.addr));
|
|
|
|
auto& peer_saved_registers = peer->get_register_dump_from_stack();
|
|
// Verify that the saved registers are in usermode context
|
|
if ((peer_saved_registers.cs & 0x03) != 3)
|
|
return EFAULT;
|
|
|
|
tracer->set_regs(regs);
|
|
copy_ptrace_registers_into_kernel_registers(peer_saved_registers, regs);
|
|
break;
|
|
}
|
|
|
|
case PT_PEEK: {
|
|
Kernel::Syscall::SC_ptrace_peek_params peek_params {};
|
|
TRY(copy_from_user(&peek_params, reinterpret_cast<Kernel::Syscall::SC_ptrace_peek_params*>(params.addr)));
|
|
if (!Memory::is_user_address(VirtualAddress { peek_params.address }))
|
|
return EFAULT;
|
|
auto data = TRY(peer->process().peek_user_data(Userspace<const FlatPtr*> { (FlatPtr)peek_params.address }));
|
|
TRY(copy_to_user(peek_params.out_data, &data));
|
|
break;
|
|
}
|
|
|
|
case PT_POKE:
|
|
if (!Memory::is_user_address(VirtualAddress { params.addr }))
|
|
return EFAULT;
|
|
TRY(peer->process().poke_user_data(Userspace<FlatPtr*> { (FlatPtr)params.addr }, params.data));
|
|
return 0;
|
|
|
|
case PT_PEEKDEBUG: {
|
|
Kernel::Syscall::SC_ptrace_peek_params peek_params {};
|
|
TRY(copy_from_user(&peek_params, reinterpret_cast<Kernel::Syscall::SC_ptrace_peek_params*>(params.addr)));
|
|
auto data = TRY(peer->peek_debug_register(reinterpret_cast<uintptr_t>(peek_params.address)));
|
|
TRY(copy_to_user(peek_params.out_data, &data));
|
|
break;
|
|
}
|
|
case PT_POKEDEBUG:
|
|
TRY(peer->poke_debug_register(reinterpret_cast<uintptr_t>(params.addr), params.data));
|
|
return 0;
|
|
default:
|
|
return EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
ErrorOr<FlatPtr> Process::sys$ptrace(Userspace<const Syscall::SC_ptrace_params*> user_params)
|
|
{
|
|
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
|
|
REQUIRE_PROMISE(ptrace);
|
|
auto params = TRY(copy_typed_from_user(user_params));
|
|
|
|
auto result = handle_ptrace(params, *this);
|
|
return result.is_error() ? result.error().code() : result.value();
|
|
}
|
|
|
|
/**
|
|
* "Does this process have a thread that is currently being traced by the provided process?"
|
|
*/
|
|
bool Process::has_tracee_thread(ProcessID tracer_pid)
|
|
{
|
|
if (auto tracer = this->tracer())
|
|
return tracer->tracer_pid() == tracer_pid;
|
|
return false;
|
|
}
|
|
|
|
ErrorOr<FlatPtr> Process::peek_user_data(Userspace<const FlatPtr*> address)
|
|
{
|
|
// This function can be called from the context of another
|
|
// process that called PT_PEEK
|
|
ScopedAddressSpaceSwitcher switcher(*this);
|
|
FlatPtr data;
|
|
TRY(copy_from_user(&data, address));
|
|
return data;
|
|
}
|
|
|
|
ErrorOr<void> Process::poke_user_data(Userspace<FlatPtr*> address, FlatPtr data)
|
|
{
|
|
Memory::VirtualRange range = { address.vaddr(), sizeof(FlatPtr) };
|
|
auto* region = address_space().find_region_containing(range);
|
|
if (!region)
|
|
return EFAULT;
|
|
ScopedAddressSpaceSwitcher switcher(*this);
|
|
if (region->is_shared()) {
|
|
// If the region is shared, we change its vmobject to a PrivateInodeVMObject
|
|
// to prevent the write operation from changing any shared inode data
|
|
VERIFY(region->vmobject().is_shared_inode());
|
|
auto vmobject = TRY(Memory::PrivateInodeVMObject::try_create_with_inode(static_cast<Memory::SharedInodeVMObject&>(region->vmobject()).inode()));
|
|
region->set_vmobject(move(vmobject));
|
|
region->set_shared(false);
|
|
}
|
|
const bool was_writable = region->is_writable();
|
|
if (!was_writable) {
|
|
region->set_writable(true);
|
|
region->remap();
|
|
}
|
|
ScopeGuard rollback([&]() {
|
|
if (!was_writable) {
|
|
region->set_writable(false);
|
|
region->remap();
|
|
}
|
|
});
|
|
|
|
return copy_to_user(address, &data);
|
|
}
|
|
|
|
ErrorOr<FlatPtr> Thread::peek_debug_register(u32 register_index)
|
|
{
|
|
FlatPtr data;
|
|
switch (register_index) {
|
|
case 0:
|
|
data = m_debug_register_state.dr0;
|
|
break;
|
|
case 1:
|
|
data = m_debug_register_state.dr1;
|
|
break;
|
|
case 2:
|
|
data = m_debug_register_state.dr2;
|
|
break;
|
|
case 3:
|
|
data = m_debug_register_state.dr3;
|
|
break;
|
|
case 6:
|
|
data = m_debug_register_state.dr6;
|
|
break;
|
|
case 7:
|
|
data = m_debug_register_state.dr7;
|
|
break;
|
|
default:
|
|
return EINVAL;
|
|
}
|
|
return data;
|
|
}
|
|
|
|
ErrorOr<void> Thread::poke_debug_register(u32 register_index, FlatPtr data)
|
|
{
|
|
switch (register_index) {
|
|
case 0:
|
|
m_debug_register_state.dr0 = data;
|
|
break;
|
|
case 1:
|
|
m_debug_register_state.dr1 = data;
|
|
break;
|
|
case 2:
|
|
m_debug_register_state.dr2 = data;
|
|
break;
|
|
case 3:
|
|
m_debug_register_state.dr3 = data;
|
|
break;
|
|
case 7:
|
|
m_debug_register_state.dr7 = data;
|
|
break;
|
|
default:
|
|
return EINVAL;
|
|
}
|
|
return {};
|
|
}
|
|
|
|
}
|