2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2022-08-20 16:25:54 +00:00
|
|
|
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
#pragma once
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
#include <AK/Concepts.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <AK/HashMap.h>
|
2021-06-06 21:40:03 +00:00
|
|
|
#include <AK/IntrusiveList.h>
|
2021-07-24 16:43:29 +00:00
|
|
|
#include <AK/IntrusiveListRelaxedConst.h>
|
2021-08-07 19:30:06 +00:00
|
|
|
#include <AK/OwnPtr.h>
|
2022-08-20 16:25:54 +00:00
|
|
|
#include <AK/RefPtr.h>
|
2020-07-31 14:28:37 +00:00
|
|
|
#include <AK/Userspace.h>
|
2021-08-23 04:01:04 +00:00
|
|
|
#include <AK/Variant.h>
|
2022-01-31 21:09:30 +00:00
|
|
|
#include <Kernel/API/POSIX/sys/resource.h>
|
2020-07-04 23:37:36 +00:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2021-11-30 22:59:05 +00:00
|
|
|
#include <Kernel/Assertions.h>
|
2021-07-07 16:29:19 +00:00
|
|
|
#include <Kernel/AtomicEdgeAction.h>
|
2022-08-20 16:25:54 +00:00
|
|
|
#include <Kernel/Credentials.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <Kernel/FileSystem/InodeMetadata.h>
|
2021-09-07 11:39:11 +00:00
|
|
|
#include <Kernel/FileSystem/OpenFileDescription.h>
|
2021-08-06 12:11:45 +00:00
|
|
|
#include <Kernel/FileSystem/UnveilNode.h>
|
2020-02-16 00:50:16 +00:00
|
|
|
#include <Kernel/Forward.h>
|
2020-12-22 06:21:58 +00:00
|
|
|
#include <Kernel/FutexQueue.h>
|
2022-11-02 20:26:02 +00:00
|
|
|
#include <Kernel/Jail.h>
|
2022-08-19 18:53:40 +00:00
|
|
|
#include <Kernel/Library/LockWeakPtr.h>
|
|
|
|
#include <Kernel/Library/LockWeakable.h>
|
|
|
|
#include <Kernel/Library/NonnullLockRefPtrVector.h>
|
2021-07-18 07:10:27 +00:00
|
|
|
#include <Kernel/Locking/Mutex.h>
|
2021-08-21 21:31:15 +00:00
|
|
|
#include <Kernel/Locking/MutexProtected.h>
|
2021-08-06 11:57:39 +00:00
|
|
|
#include <Kernel/Memory/AddressSpace.h>
|
2021-04-25 21:42:36 +00:00
|
|
|
#include <Kernel/PerformanceEventBuffer.h>
|
2021-08-10 17:51:28 +00:00
|
|
|
#include <Kernel/ProcessExposed.h>
|
2020-08-15 19:13:19 +00:00
|
|
|
#include <Kernel/ProcessGroup.h>
|
2020-06-18 20:18:44 +00:00
|
|
|
#include <Kernel/StdLib.h>
|
2019-03-23 21:03:17 +00:00
|
|
|
#include <Kernel/Thread.h>
|
2019-05-28 09:53:16 +00:00
|
|
|
#include <Kernel/UnixTypes.h>
|
2021-04-16 19:53:43 +00:00
|
|
|
#include <LibC/elf.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2022-01-12 22:27:33 +00:00
|
|
|
MutexProtected<OwnPtr<KString>>& hostname();
|
2021-02-28 01:18:48 +00:00
|
|
|
Time kgettimeofday();
|
2019-03-13 12:13:23 +00:00
|
|
|
|
2020-06-18 20:18:44 +00:00
|
|
|
#define ENUMERATE_PLEDGE_PROMISES \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(stdio) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(rpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(wpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(cpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(dpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(inet) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(id) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(proc) \
|
2021-01-11 21:30:57 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(ptrace) \
|
2020-06-18 20:18:44 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(exec) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(unix) \
|
2020-06-24 20:57:37 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(recvfd) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(sendfd) \
|
2020-06-18 20:18:44 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(fattr) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(tty) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(chown) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(thread) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(video) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(accept) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(settime) \
|
2020-05-26 10:49:35 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(sigaction) \
|
2021-01-29 17:50:27 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(setkeymap) \
|
2021-01-31 21:50:17 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(prot_exec) \
|
2021-02-21 00:08:48 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(map_fixed) \
|
2022-03-24 20:47:42 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(getkeymap) \
|
2022-11-02 20:26:02 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(jail) \
|
2022-03-24 20:47:42 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(no_error)
|
2020-01-11 19:48:43 +00:00
|
|
|
|
|
|
|
enum class Pledge : u32 {
|
|
|
|
#define __ENUMERATE_PLEDGE_PROMISE(x) x,
|
|
|
|
ENUMERATE_PLEDGE_PROMISES
|
|
|
|
#undef __ENUMERATE_PLEDGE_PROMISE
|
|
|
|
};
|
|
|
|
|
2020-01-21 18:28:29 +00:00
|
|
|
enum class VeilState {
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
None,
|
2020-01-21 18:28:29 +00:00
|
|
|
Dropped,
|
|
|
|
Locked,
|
2022-11-04 17:20:11 +00:00
|
|
|
LockedInherited,
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
};
|
|
|
|
|
2022-07-13 22:25:35 +00:00
|
|
|
static constexpr FlatPtr futex_key_private_flag = 0b1;
|
|
|
|
union GlobalFutexKey {
|
|
|
|
struct {
|
|
|
|
Memory::VMObject const* vmobject;
|
|
|
|
FlatPtr offset;
|
|
|
|
} shared;
|
|
|
|
struct {
|
|
|
|
Memory::AddressSpace const* address_space;
|
|
|
|
FlatPtr user_address;
|
|
|
|
} private_;
|
|
|
|
struct {
|
|
|
|
FlatPtr parent;
|
|
|
|
FlatPtr offset;
|
|
|
|
} raw;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(GlobalFutexKey) == (sizeof(FlatPtr) * 2));
|
2020-12-22 06:21:58 +00:00
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
struct LoadResult;
|
|
|
|
|
2021-08-14 13:20:13 +00:00
|
|
|
class Process final
|
2021-12-28 22:46:21 +00:00
|
|
|
: public ListedRefCounted<Process, LockType::Spinlock>
|
2022-08-19 18:53:40 +00:00
|
|
|
, public LockWeakable<Process> {
|
2020-08-06 09:17:53 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
class ProtectedValues {
|
|
|
|
public:
|
|
|
|
ProcessID pid { 0 };
|
|
|
|
ProcessID ppid { 0 };
|
|
|
|
SessionID sid { 0 };
|
2022-08-20 16:25:54 +00:00
|
|
|
// FIXME: This should be a NonnullRefPtr
|
|
|
|
RefPtr<Credentials> credentials;
|
2021-08-07 19:30:06 +00:00
|
|
|
bool dumpable { false };
|
2022-12-23 11:51:47 +00:00
|
|
|
bool executable_is_setid { false };
|
2021-08-07 19:30:06 +00:00
|
|
|
Atomic<bool> has_promises { false };
|
|
|
|
Atomic<u32> promises { 0 };
|
|
|
|
Atomic<bool> has_execpromises { false };
|
|
|
|
Atomic<u32> execpromises { 0 };
|
|
|
|
mode_t umask { 022 };
|
|
|
|
VirtualAddress signal_trampoline;
|
|
|
|
Atomic<u32> thread_count { 0 };
|
|
|
|
u8 termination_status { 0 };
|
|
|
|
u8 termination_signal { 0 };
|
|
|
|
};
|
|
|
|
|
|
|
|
public:
|
2020-04-22 09:54:58 +00:00
|
|
|
AK_MAKE_NONCOPYABLE(Process);
|
|
|
|
AK_MAKE_NONMOVABLE(Process);
|
|
|
|
|
2021-03-11 12:13:05 +00:00
|
|
|
MAKE_ALIGNED_ALLOCATED(Process, PAGE_SIZE);
|
|
|
|
|
2019-03-23 21:03:17 +00:00
|
|
|
friend class Thread;
|
2021-08-22 12:51:04 +00:00
|
|
|
friend class Coredump;
|
2019-05-28 09:53:16 +00:00
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
auto with_protected_data(auto&& callback) const
|
|
|
|
{
|
|
|
|
SpinlockLocker locker(m_protected_data_lock);
|
|
|
|
return callback(m_protected_values_do_not_access_directly);
|
|
|
|
}
|
2021-03-10 18:59:46 +00:00
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
auto with_mutable_protected_data(auto&& callback)
|
|
|
|
{
|
|
|
|
SpinlockLocker locker(m_protected_data_lock);
|
|
|
|
unprotect_data();
|
|
|
|
auto guard = ScopeGuard([&] { protect_data(); });
|
|
|
|
return callback(m_protected_values_do_not_access_directly);
|
|
|
|
}
|
2021-03-10 18:59:46 +00:00
|
|
|
|
2021-07-13 23:15:24 +00:00
|
|
|
enum class State : u8 {
|
|
|
|
Running = 0,
|
|
|
|
Dying,
|
|
|
|
Dead
|
|
|
|
};
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
public:
|
2021-08-14 12:43:34 +00:00
|
|
|
class ProcessProcFSTraits;
|
|
|
|
|
2023-01-04 17:19:27 +00:00
|
|
|
static Process& current()
|
2020-06-28 21:34:31 +00:00
|
|
|
{
|
2021-10-31 22:36:52 +00:00
|
|
|
auto* current_thread = Processor::current_thread();
|
2021-08-19 19:45:07 +00:00
|
|
|
VERIFY(current_thread);
|
|
|
|
return current_thread->process();
|
|
|
|
}
|
|
|
|
|
2023-01-04 17:19:27 +00:00
|
|
|
static bool has_current()
|
2021-08-19 19:45:07 +00:00
|
|
|
{
|
2021-11-06 21:06:08 +00:00
|
|
|
return Processor::current_thread() != nullptr;
|
2020-06-28 21:34:31 +00:00
|
|
|
}
|
2020-02-17 14:04:27 +00:00
|
|
|
|
2021-05-23 19:45:58 +00:00
|
|
|
template<typename EntryFunction>
|
|
|
|
static void kernel_process_trampoline(void* data)
|
|
|
|
{
|
|
|
|
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
|
|
|
|
(*func)();
|
|
|
|
delete func;
|
|
|
|
}
|
|
|
|
|
2021-07-09 01:37:36 +00:00
|
|
|
enum class RegisterProcess {
|
|
|
|
No,
|
|
|
|
Yes
|
|
|
|
};
|
|
|
|
|
2020-11-17 03:51:34 +00:00
|
|
|
template<typename EntryFunction>
|
2022-08-19 18:53:40 +00:00
|
|
|
static LockRefPtr<Process> create_kernel_process(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes)
|
2020-11-17 03:51:34 +00:00
|
|
|
{
|
|
|
|
auto* entry_func = new EntryFunction(move(entry));
|
2021-07-09 01:37:36 +00:00
|
|
|
return create_kernel_process(first_thread, move(name), &Process::kernel_process_trampoline<EntryFunction>, entry_func, affinity, do_register);
|
2020-11-17 03:51:34 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
static LockRefPtr<Process> create_kernel_process(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes);
|
|
|
|
static ErrorOr<NonnullLockRefPtr<Process>> try_create_user_process(LockRefPtr<Thread>& first_thread, StringView path, UserID, GroupID, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, TTY*);
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
static void register_new(Process&);
|
2021-08-14 13:11:40 +00:00
|
|
|
|
2018-11-01 12:15:46 +00:00
|
|
|
~Process();
|
2018-10-23 10:44:46 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
LockRefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
|
2020-06-28 21:34:31 +00:00
|
|
|
|
2019-12-11 19:36:56 +00:00
|
|
|
bool is_profiling() const { return m_profiling; }
|
|
|
|
void set_profiling(bool profiling) { m_profiling = profiling; }
|
2021-08-22 12:51:04 +00:00
|
|
|
|
|
|
|
bool should_generate_coredump() const { return m_should_generate_coredump; }
|
|
|
|
void set_should_generate_coredump(bool b) { m_should_generate_coredump = b; }
|
2019-12-11 19:36:56 +00:00
|
|
|
|
2021-07-13 23:15:24 +00:00
|
|
|
bool is_dying() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) != State::Running; }
|
|
|
|
bool is_dead() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) == State::Dead; }
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-01-03 23:58:50 +00:00
|
|
|
bool is_stopped() const { return m_is_stopped; }
|
|
|
|
bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); }
|
2020-12-09 04:18:45 +00:00
|
|
|
|
2020-09-10 15:46:24 +00:00
|
|
|
bool is_kernel_process() const { return m_is_kernel_process; }
|
|
|
|
bool is_user_process() const { return !m_is_kernel_process; }
|
2018-11-07 20:19:47 +00:00
|
|
|
|
2022-11-02 20:26:02 +00:00
|
|
|
static LockRefPtr<Process> from_pid_in_same_jail(ProcessID);
|
|
|
|
static LockRefPtr<Process> from_pid_ignoring_jails(ProcessID);
|
2020-08-08 20:04:20 +00:00
|
|
|
static SessionID get_sid_from_pgid(ProcessGroupID pgid);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-09-07 10:53:28 +00:00
|
|
|
StringView name() const { return m_name->view(); }
|
2022-08-21 10:18:26 +00:00
|
|
|
ProcessID pid() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.pid; });
|
|
|
|
}
|
|
|
|
SessionID sid() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.sid; });
|
|
|
|
}
|
2021-08-07 19:30:06 +00:00
|
|
|
bool is_session_leader() const { return sid().value() == pid().value(); }
|
2020-08-15 19:13:19 +00:00
|
|
|
ProcessGroupID pgid() const { return m_pg ? m_pg->pgid() : 0; }
|
2021-08-07 19:30:06 +00:00
|
|
|
bool is_group_leader() const { return pgid().value() == pid().value(); }
|
2022-08-21 10:18:26 +00:00
|
|
|
ProcessID ppid() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.ppid; });
|
|
|
|
}
|
2021-08-07 19:30:06 +00:00
|
|
|
|
2023-01-06 08:08:22 +00:00
|
|
|
SpinlockProtected<RefPtr<Jail>, LockRank::Process> const& jail() { return m_attached_jail; }
|
2022-11-02 20:26:02 +00:00
|
|
|
|
2023-01-06 07:13:40 +00:00
|
|
|
bool is_currently_in_jail() const
|
|
|
|
{
|
|
|
|
return m_attached_jail.with([&](auto& jail) -> bool { return !jail.is_null(); });
|
|
|
|
}
|
|
|
|
|
2022-08-20 16:25:54 +00:00
|
|
|
NonnullRefPtr<Credentials> credentials() const;
|
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
bool is_dumpable() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.dumpable; });
|
|
|
|
}
|
2021-03-10 21:42:07 +00:00
|
|
|
void set_dumpable(bool);
|
2020-12-25 17:27:42 +00:00
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
mode_t umask() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.umask; });
|
|
|
|
}
|
2019-02-22 01:39:13 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
// Breakable iteration functions
|
|
|
|
template<IteratorFunction<Process&> Callback>
|
2022-11-02 20:26:02 +00:00
|
|
|
static void for_each_ignoring_jails(Callback);
|
|
|
|
|
|
|
|
static ErrorOr<void> for_each_in_same_jail(Function<ErrorOr<void>(Process&)>);
|
|
|
|
ErrorOr<void> for_each_in_pgrp_in_same_jail(ProcessGroupID, Function<ErrorOr<void>(Process&)>);
|
|
|
|
ErrorOr<void> for_each_child_in_same_jail(Function<ErrorOr<void>(Process&)>);
|
2021-03-11 13:12:55 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2021-03-11 13:12:55 +00:00
|
|
|
IterationDecision for_each_thread(Callback);
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
|
|
|
IterationDecision for_each_thread(Callback callback) const;
|
2022-02-24 18:01:33 +00:00
|
|
|
ErrorOr<void> try_for_each_thread(Function<ErrorOr<void>(Thread const&)>) const;
|
2021-05-16 09:36:52 +00:00
|
|
|
|
|
|
|
// Non-breakable iteration functions
|
|
|
|
template<VoidFunction<Process&> Callback>
|
2022-11-02 20:26:02 +00:00
|
|
|
static void for_each_ignoring_jails(Callback);
|
2021-05-16 09:36:52 +00:00
|
|
|
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
IterationDecision for_each_thread(Callback);
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
2021-03-11 13:12:55 +00:00
|
|
|
IterationDecision for_each_thread(Callback callback) const;
|
2018-11-02 13:06:48 +00:00
|
|
|
|
2019-01-30 17:26:19 +00:00
|
|
|
void die();
|
2020-12-09 04:18:45 +00:00
|
|
|
void finalize();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
ThreadTracer* tracer() { return m_tracer.ptr(); }
|
|
|
|
bool is_traced() const { return !!m_tracer; }
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> start_tracing_from(ProcessID tracer);
|
2020-12-09 04:18:45 +00:00
|
|
|
void stop_tracing();
|
2022-04-01 17:58:27 +00:00
|
|
|
void tracer_trap(Thread&, RegisterState const&);
|
2020-12-09 04:18:45 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$emuctl();
|
|
|
|
ErrorOr<FlatPtr> sys$yield();
|
|
|
|
ErrorOr<FlatPtr> sys$sync();
|
|
|
|
ErrorOr<FlatPtr> sys$beep();
|
|
|
|
ErrorOr<FlatPtr> sys$get_process_name(Userspace<char*> buffer, size_t buffer_size);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_process_name(Userspace<char const*> user_name, size_t user_name_length);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$create_inode_watcher(u32 flags);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$inode_watcher_add_watch(Userspace<Syscall::SC_inode_watcher_add_watch_params const*> user_params);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$inode_watcher_remove_watch(int fd, int wd);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$dbgputstr(Userspace<char const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$dump_backtrace();
|
|
|
|
ErrorOr<FlatPtr> sys$gettid();
|
|
|
|
ErrorOr<FlatPtr> sys$setsid();
|
|
|
|
ErrorOr<FlatPtr> sys$getsid(pid_t);
|
|
|
|
ErrorOr<FlatPtr> sys$setpgid(pid_t pid, pid_t pgid);
|
|
|
|
ErrorOr<FlatPtr> sys$getpgrp();
|
|
|
|
ErrorOr<FlatPtr> sys$getpgid(pid_t);
|
|
|
|
ErrorOr<FlatPtr> sys$getuid();
|
|
|
|
ErrorOr<FlatPtr> sys$getgid();
|
|
|
|
ErrorOr<FlatPtr> sys$geteuid();
|
|
|
|
ErrorOr<FlatPtr> sys$getegid();
|
|
|
|
ErrorOr<FlatPtr> sys$getpid();
|
|
|
|
ErrorOr<FlatPtr> sys$getppid();
|
|
|
|
ErrorOr<FlatPtr> sys$getresuid(Userspace<UserID*>, Userspace<UserID*>, Userspace<UserID*>);
|
|
|
|
ErrorOr<FlatPtr> sys$getresgid(Userspace<GroupID*>, Userspace<GroupID*>, Userspace<GroupID*>);
|
2022-01-31 21:09:30 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getrusage(int, Userspace<rusage*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$umask(mode_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$open(Userspace<Syscall::SC_open_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$close(int fd);
|
|
|
|
ErrorOr<FlatPtr> sys$read(int fd, Userspace<u8*>, size_t);
|
2021-12-17 07:34:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$pread(int fd, Userspace<u8*>, size_t, Userspace<off_t const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$readv(int fd, Userspace<const struct iovec*> iov, int iov_count);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$write(int fd, Userspace<u8 const*>, size_t);
|
2022-10-01 13:21:20 +00:00
|
|
|
ErrorOr<FlatPtr> sys$pwritev(int fd, Userspace<const struct iovec*> iov, int iov_count, Userspace<off_t const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fstat(int fd, Userspace<stat*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$stat(Userspace<Syscall::SC_stat_params const*>);
|
2022-12-15 18:38:19 +00:00
|
|
|
ErrorOr<FlatPtr> sys$annotate_mapping(Userspace<void*>, int flags);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$lseek(int fd, Userspace<off_t*>, int whence);
|
2021-12-17 07:34:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$ftruncate(int fd, Userspace<off_t const*>);
|
2022-06-18 16:37:54 +00:00
|
|
|
ErrorOr<FlatPtr> sys$posix_fallocate(int fd, Userspace<off_t const*>, Userspace<off_t const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$kill(pid_t pid_or_pgid, int sig);
|
2019-02-15 11:30:48 +00:00
|
|
|
[[noreturn]] void sys$exit(int status);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigreturn(RegisterState& registers);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$waitid(Userspace<Syscall::SC_waitid_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$mmap(Userspace<Syscall::SC_mmap_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$mremap(Userspace<Syscall::SC_mremap_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$munmap(Userspace<void*>, size_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_mmap_name(Userspace<Syscall::SC_set_mmap_name_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$mprotect(Userspace<void*>, size_t, int prot);
|
|
|
|
ErrorOr<FlatPtr> sys$madvise(Userspace<void*>, size_t, int advice);
|
2021-11-17 18:33:00 +00:00
|
|
|
ErrorOr<FlatPtr> sys$msync(Userspace<void*>, size_t, int flags);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$purge(int mode);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$poll(Userspace<Syscall::SC_poll_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$get_dir_entries(int fd, Userspace<void*>, size_t);
|
|
|
|
ErrorOr<FlatPtr> sys$getcwd(Userspace<char*>, size_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$chdir(Userspace<char const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fchdir(int fd);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$adjtime(Userspace<timeval const*>, Userspace<timeval*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$clock_gettime(clockid_t, Userspace<timespec*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$clock_settime(clockid_t, Userspace<timespec const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$clock_nanosleep(Userspace<Syscall::SC_clock_nanosleep_params const*>);
|
2022-07-25 11:09:30 +00:00
|
|
|
ErrorOr<FlatPtr> sys$clock_getres(Userspace<Syscall::SC_clock_getres_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$gethostname(Userspace<char*>, size_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sethostname(Userspace<char const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$uname(Userspace<utsname*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$readlink(Userspace<Syscall::SC_readlink_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fork(RegisterState&);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$execve(Userspace<Syscall::SC_execve_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$dup2(int old_fd, int new_fd);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigaction(int signum, Userspace<sigaction const*> act, Userspace<sigaction*> old_act);
|
|
|
|
ErrorOr<FlatPtr> sys$sigaltstack(Userspace<stack_t const*> ss, Userspace<stack_t*> old_ss);
|
|
|
|
ErrorOr<FlatPtr> sys$sigprocmask(int how, Userspace<sigset_t const*> set, Userspace<sigset_t*> old_set);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigpending(Userspace<sigset_t*>);
|
2022-05-13 11:15:45 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigsuspend(Userspace<sigset_t const*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigtimedwait(Userspace<sigset_t const*>, Userspace<siginfo_t*>, Userspace<timespec const*>);
|
2022-08-20 16:25:54 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getgroups(size_t, Userspace<GroupID*>);
|
|
|
|
ErrorOr<FlatPtr> sys$setgroups(size_t, Userspace<GroupID const*>);
|
2022-08-16 18:35:32 +00:00
|
|
|
ErrorOr<FlatPtr> sys$pipe(Userspace<int*>, int flags);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$killpg(pid_t pgrp, int sig);
|
|
|
|
ErrorOr<FlatPtr> sys$seteuid(UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setegid(GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$setuid(UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setgid(GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$setreuid(UserID, UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setresuid(UserID, UserID, UserID);
|
2022-10-01 12:47:38 +00:00
|
|
|
ErrorOr<FlatPtr> sys$setregid(GroupID, GroupID);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$setresgid(GroupID, GroupID, GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$alarm(unsigned seconds);
|
2022-10-01 12:24:56 +00:00
|
|
|
ErrorOr<FlatPtr> sys$faccessat(Userspace<Syscall::SC_faccessat_params const*>);
|
2022-07-02 23:02:45 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fcntl(int fd, int cmd, uintptr_t extra_arg);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$ioctl(int fd, unsigned request, FlatPtr arg);
|
2022-10-01 11:36:24 +00:00
|
|
|
ErrorOr<FlatPtr> sys$mkdir(int dirfd, Userspace<char const*> pathname, size_t path_length, mode_t mode);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$times(Userspace<tms*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$utime(Userspace<char const*> pathname, size_t path_length, Userspace<const struct utimbuf*>);
|
2022-05-02 20:26:10 +00:00
|
|
|
ErrorOr<FlatPtr> sys$utimensat(Userspace<Syscall::SC_utimensat_params const*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$link(Userspace<Syscall::SC_link_params const*>);
|
2022-02-10 11:30:33 +00:00
|
|
|
ErrorOr<FlatPtr> sys$unlink(int dirfd, Userspace<char const*> pathname, size_t path_length, int flags);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$symlink(Userspace<Syscall::SC_symlink_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$rmdir(Userspace<char const*> pathname, size_t path_length);
|
|
|
|
ErrorOr<FlatPtr> sys$mount(Userspace<Syscall::SC_mount_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$umount(Userspace<char const*> mountpoint, size_t mountpoint_length);
|
2022-01-11 15:51:34 +00:00
|
|
|
ErrorOr<FlatPtr> sys$chmod(Userspace<Syscall::SC_chmod_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fchmod(int fd, mode_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$chown(Userspace<Syscall::SC_chown_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fchown(int fd, UserID, GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$fsync(int fd);
|
|
|
|
ErrorOr<FlatPtr> sys$socket(int domain, int type, int protocol);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$bind(int sockfd, Userspace<sockaddr const*> addr, socklen_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$listen(int sockfd, int backlog);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$accept4(Userspace<Syscall::SC_accept4_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$connect(int sockfd, Userspace<sockaddr const*>, socklen_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$shutdown(int sockfd, int how);
|
|
|
|
ErrorOr<FlatPtr> sys$sendmsg(int sockfd, Userspace<const struct msghdr*>, int flags);
|
|
|
|
ErrorOr<FlatPtr> sys$recvmsg(int sockfd, Userspace<struct msghdr*>, int flags);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getsockopt(Userspace<Syscall::SC_getsockopt_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$setsockopt(Userspace<Syscall::SC_setsockopt_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$getsockname(Userspace<Syscall::SC_getsockname_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$getpeername(Userspace<Syscall::SC_getpeername_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$socketpair(Userspace<Syscall::SC_socketpair_params const*>);
|
2022-07-24 14:00:51 +00:00
|
|
|
ErrorOr<FlatPtr> sys$scheduler_set_parameters(Userspace<Syscall::SC_scheduler_parameters_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$scheduler_get_parameters(Userspace<Syscall::SC_scheduler_parameters_params*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$create_thread(void* (*)(void*), Userspace<Syscall::SC_create_thread_params const*>);
|
2021-05-28 09:20:22 +00:00
|
|
|
[[noreturn]] void sys$exit_thread(Userspace<void*>, Userspace<void*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$join_thread(pid_t tid, Userspace<void**> exit_value);
|
|
|
|
ErrorOr<FlatPtr> sys$detach_thread(pid_t tid);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_thread_name(pid_t tid, Userspace<char const*> buffer, size_t buffer_size);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$get_thread_name(pid_t tid, Userspace<char*> buffer, size_t buffer_size);
|
|
|
|
ErrorOr<FlatPtr> sys$kill_thread(pid_t tid, int signal);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$rename(Userspace<Syscall::SC_rename_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$mknod(Userspace<Syscall::SC_mknod_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$realpath(Userspace<Syscall::SC_realpath_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getrandom(Userspace<void*>, size_t, unsigned int);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getkeymap(Userspace<Syscall::SC_getkeymap_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$setkeymap(Userspace<Syscall::SC_setkeymap_params const*>);
|
2022-02-18 21:12:35 +00:00
|
|
|
ErrorOr<FlatPtr> sys$profiling_enable(pid_t, Userspace<u64 const*>);
|
2022-08-21 01:55:55 +00:00
|
|
|
ErrorOr<FlatPtr> profiling_enable(pid_t, u64 event_mask);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$profiling_disable(pid_t);
|
|
|
|
ErrorOr<FlatPtr> sys$profiling_free_buffer(pid_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$futex(Userspace<Syscall::SC_futex_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$pledge(Userspace<Syscall::SC_pledge_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$unveil(Userspace<Syscall::SC_unveil_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2);
|
|
|
|
ErrorOr<FlatPtr> sys$perf_register_string(Userspace<char const*>, size_t);
|
|
|
|
ErrorOr<FlatPtr> sys$get_stack_bounds(Userspace<FlatPtr*> stack_base, Userspace<size_t*> stack_size);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$ptrace(Userspace<Syscall::SC_ptrace_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sendfd(int sockfd, int fd);
|
|
|
|
ErrorOr<FlatPtr> sys$recvfd(int sockfd, int options);
|
|
|
|
ErrorOr<FlatPtr> sys$sysconf(int name);
|
|
|
|
ErrorOr<FlatPtr> sys$disown(ProcessID);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$allocate_tls(Userspace<char const*> initial_data, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$prctl(int option, FlatPtr arg1, FlatPtr arg2);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_coredump_metadata(Userspace<Syscall::SC_set_coredump_metadata_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$anon_create(size_t, int options);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$statvfs(Userspace<Syscall::SC_statvfs_params const*> user_params);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fstatvfs(int fd, statvfs* buf);
|
|
|
|
ErrorOr<FlatPtr> sys$map_time_page();
|
2022-11-02 20:26:02 +00:00
|
|
|
ErrorOr<FlatPtr> sys$jail_create(Userspace<Syscall::SC_jail_create_params*> user_params);
|
|
|
|
ErrorOr<FlatPtr> sys$jail_attach(Userspace<Syscall::SC_jail_attach_params const*> user_params);
|
2023-01-10 16:25:01 +00:00
|
|
|
ErrorOr<FlatPtr> sys$get_root_session_id(pid_t force_sid);
|
2019-02-16 11:13:43 +00:00
|
|
|
|
2020-01-27 20:11:36 +00:00
|
|
|
template<bool sockname, typename Params>
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> get_sock_or_peer_name(Params const&);
|
2020-01-27 20:11:36 +00:00
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
static void initialize();
|
|
|
|
|
2021-06-28 16:32:25 +00:00
|
|
|
[[noreturn]] void crash(int signal, FlatPtr ip, bool out_of_memory = false);
|
2021-10-31 21:54:39 +00:00
|
|
|
[[nodiscard]] siginfo_t wait_info() const;
|
2018-10-17 22:26:30 +00:00
|
|
|
|
2018-10-30 14:33:37 +00:00
|
|
|
const TTY* tty() const { return m_tty; }
|
2020-02-16 01:01:42 +00:00
|
|
|
void set_tty(TTY*);
|
2018-10-30 14:33:37 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u32 m_ticks_in_user { 0 };
|
|
|
|
u32 m_ticks_in_kernel { 0 };
|
2018-12-03 00:12:26 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u32 m_ticks_in_user_for_dead_children { 0 };
|
|
|
|
u32 m_ticks_in_kernel_for_dead_children { 0 };
|
2018-12-03 00:12:26 +00:00
|
|
|
|
2022-08-20 23:04:35 +00:00
|
|
|
NonnullRefPtr<Custody> current_directory();
|
|
|
|
RefPtr<Custody> executable();
|
|
|
|
RefPtr<Custody const> executable() const;
|
2018-10-28 11:20:25 +00:00
|
|
|
|
2022-02-13 19:07:51 +00:00
|
|
|
static constexpr size_t max_arguments_size = Thread::default_userspace_stack_size / 8;
|
|
|
|
static constexpr size_t max_environment_size = Thread::default_userspace_stack_size / 8;
|
2022-12-05 14:12:48 +00:00
|
|
|
static constexpr size_t max_auxiliary_size = Thread::default_userspace_stack_size / 8;
|
2021-09-09 09:36:40 +00:00
|
|
|
NonnullOwnPtrVector<KString> const& arguments() const { return m_arguments; };
|
|
|
|
NonnullOwnPtrVector<KString> const& environment() const { return m_environment; };
|
2021-01-15 19:21:03 +00:00
|
|
|
|
2023-01-25 16:09:29 +00:00
|
|
|
ErrorOr<void> exec(NonnullOwnPtr<KString> path, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, Thread*& new_main_thread, InterruptsState& previous_interrupts_state, int recursion_depth = 0);
|
2018-11-02 19:41:58 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<LoadResult> load(NonnullLockRefPtr<OpenFileDescription> main_program_description, LockRefPtr<OpenFileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header);
|
2020-10-10 09:13:21 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
void terminate_due_to_signal(u8 signal);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> send_signal(u8 signal, Process* sender);
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
u8 termination_signal() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) -> u8 {
|
|
|
|
return protected_data.termination_signal;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
u8 termination_status() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.termination_status; });
|
|
|
|
}
|
2021-01-03 20:46:38 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
u16 thread_count() const
|
|
|
|
{
|
2022-08-21 10:18:26 +00:00
|
|
|
return with_protected_data([](auto& protected_data) {
|
|
|
|
return protected_data.thread_count.load(AK::MemoryOrder::memory_order_relaxed);
|
|
|
|
});
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
2019-12-22 10:35:02 +00:00
|
|
|
|
2021-07-17 19:09:51 +00:00
|
|
|
Mutex& big_lock() { return m_big_lock; }
|
|
|
|
Mutex& ptrace_lock() { return m_ptrace_lock; }
|
2019-04-01 18:02:05 +00:00
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
bool has_promises() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.has_promises.load(); });
|
|
|
|
}
|
|
|
|
bool has_promised(Pledge pledge) const
|
|
|
|
{
|
|
|
|
return with_protected_data([&](auto& protected_data) {
|
|
|
|
return (protected_data.promises & (1U << (u32)pledge)) != 0;
|
|
|
|
});
|
|
|
|
}
|
2020-01-11 19:48:43 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
VeilState veil_state() const
|
|
|
|
{
|
2022-03-07 20:23:08 +00:00
|
|
|
return m_unveil_data.with([&](auto const& unveil_data) { return unveil_data.state; });
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
|
2022-03-07 20:23:08 +00:00
|
|
|
struct UnveilData {
|
|
|
|
explicit UnveilData(UnveilNode&& p)
|
|
|
|
: paths(move(p))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
VeilState state { VeilState::None };
|
|
|
|
UnveilNode paths;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto& unveil_data() { return m_unveil_data; }
|
|
|
|
auto const& unveil_data() const { return m_unveil_data; }
|
|
|
|
|
2022-11-04 17:20:11 +00:00
|
|
|
auto& exec_unveil_data() { return m_exec_unveil_data; }
|
|
|
|
auto const& exec_unveil_data() const { return m_exec_unveil_data; }
|
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
bool wait_for_tracer_at_next_execve() const
|
|
|
|
{
|
|
|
|
return m_wait_for_tracer_at_next_execve;
|
|
|
|
}
|
|
|
|
void set_wait_for_tracer_at_next_execve(bool val)
|
|
|
|
{
|
|
|
|
m_wait_for_tracer_at_next_execve = val;
|
|
|
|
}
|
2020-04-07 15:23:37 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<void> peek_user_data(Span<u8> destination, Userspace<u8 const*> address);
|
|
|
|
ErrorOr<FlatPtr> peek_user_data(Userspace<FlatPtr const*> address);
|
2021-11-19 14:13:07 +00:00
|
|
|
ErrorOr<void> poke_user_data(Userspace<FlatPtr*> address, FlatPtr data);
|
2020-04-07 15:23:37 +00:00
|
|
|
|
2020-12-09 02:04:05 +00:00
|
|
|
void disowned_by_waiter(Process& process);
|
2020-12-09 04:18:45 +00:00
|
|
|
void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
|
2021-08-22 23:22:38 +00:00
|
|
|
Thread::WaitBlockerSet& wait_blocker_set() { return m_wait_blocker_set; }
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2021-08-05 21:43:10 +00:00
|
|
|
template<typename Callback>
|
2022-02-24 18:08:48 +00:00
|
|
|
ErrorOr<void> for_each_coredump_property(Callback callback) const
|
2021-08-05 21:43:10 +00:00
|
|
|
{
|
2022-04-09 17:30:20 +00:00
|
|
|
return m_coredump_properties.with([&](auto const& coredump_properties) -> ErrorOr<void> {
|
|
|
|
for (auto const& property : coredump_properties) {
|
|
|
|
if (property.key && property.value)
|
|
|
|
TRY(callback(*property.key, *property.value));
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
});
|
2021-08-05 21:43:10 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> set_coredump_property(NonnullOwnPtr<KString> key, NonnullOwnPtr<KString> value);
|
|
|
|
ErrorOr<void> try_set_coredump_property(StringView key, StringView value);
|
2020-12-30 14:19:57 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
NonnullLockRefPtrVector<Thread> const& threads_for_coredump(Badge<Coredump>) const { return m_threads_for_coredump; }
|
2021-01-28 07:41:18 +00:00
|
|
|
|
2021-01-11 08:52:18 +00:00
|
|
|
PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
|
2021-09-07 15:35:56 +00:00
|
|
|
PerformanceEventBuffer const* perf_events() const { return m_perf_event_buffer; }
|
2021-01-11 08:52:18 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<OwnPtr<Memory::AddressSpace>, LockRank::None>& address_space() { return m_space; }
|
|
|
|
SpinlockProtected<OwnPtr<Memory::AddressSpace>, LockRank::None> const& address_space() const { return m_space; }
|
2021-02-02 18:56:11 +00:00
|
|
|
|
2022-08-21 10:18:26 +00:00
|
|
|
VirtualAddress signal_trampoline() const
|
|
|
|
{
|
|
|
|
return with_protected_data([](auto& protected_data) { return protected_data.signal_trampoline; });
|
|
|
|
}
|
2021-02-13 23:53:53 +00:00
|
|
|
|
2021-12-29 09:11:45 +00:00
|
|
|
ErrorOr<void> require_promise(Pledge);
|
|
|
|
ErrorOr<void> require_no_promises() const;
|
2021-09-04 20:26:06 +00:00
|
|
|
|
2022-02-25 01:24:57 +00:00
|
|
|
ErrorOr<void> validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr) const;
|
2022-08-23 18:18:39 +00:00
|
|
|
ErrorOr<void> validate_inode_mmap_prot(int prot, bool description_readable, bool description_writable, bool map_shared) const;
|
2022-03-10 00:53:40 +00:00
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
private:
|
2018-10-18 11:05:00 +00:00
|
|
|
friend class MemoryManager;
|
2018-11-07 21:15:02 +00:00
|
|
|
friend class Scheduler;
|
2018-11-08 13:35:30 +00:00
|
|
|
friend class Region;
|
2021-05-07 05:29:19 +00:00
|
|
|
friend class PerformanceManager;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2021-01-23 06:24:33 +00:00
|
|
|
bool add_thread(Thread&);
|
|
|
|
bool remove_thread(Thread&);
|
|
|
|
|
2022-11-04 17:20:11 +00:00
|
|
|
Process(NonnullOwnPtr<KString> name, NonnullRefPtr<Credentials>, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory, RefPtr<Custody> executable, TTY* tty, UnveilNode unveil_tree, UnveilNode exec_unveil_tree);
|
2022-08-20 23:04:35 +00:00
|
|
|
static ErrorOr<NonnullLockRefPtr<Process>> try_create(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<void> attach_resources(NonnullOwnPtr<Memory::AddressSpace>&&, LockRefPtr<Thread>& first_thread, Process* fork_parent);
|
2020-08-08 15:32:34 +00:00
|
|
|
static ProcessID allocate_pid();
|
2018-10-25 09:15:17 +00:00
|
|
|
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
void kill_threads_except_self();
|
|
|
|
void kill_all_threads();
|
2021-11-29 11:06:33 +00:00
|
|
|
ErrorOr<void> dump_core();
|
2022-01-12 21:21:08 +00:00
|
|
|
ErrorOr<void> dump_perfcore();
|
2021-03-02 15:55:54 +00:00
|
|
|
bool create_perf_events_buffer_if_needed();
|
2021-04-19 04:10:05 +00:00
|
|
|
void delete_perf_events_buffer();
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
|
2023-01-25 16:09:29 +00:00
|
|
|
ErrorOr<void> do_exec(NonnullLockRefPtr<OpenFileDescription> main_program_description, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, LockRefPtr<OpenFileDescription> interpreter_description, Thread*& new_main_thread, InterruptsState& previous_interrupts_state, const ElfW(Ehdr) & main_program_header);
|
2022-10-01 13:21:20 +00:00
|
|
|
ErrorOr<FlatPtr> do_write(OpenFileDescription&, UserOrKernelBuffer const&, size_t, Optional<off_t> = {});
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-11-06 23:09:48 +00:00
|
|
|
ErrorOr<FlatPtr> do_statvfs(FileSystem const& path, Custody const*, statvfs* buf);
|
2021-05-19 09:31:43 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<LockRefPtr<OpenFileDescription>> find_elf_interpreter_for_executable(StringView path, ElfW(Ehdr) const& main_executable_header, size_t main_executable_header_size, size_t file_size);
|
Kernel: Tighten up exec/do_exec and allow for PT_INTERP iterpreters
This patch changes how exec() figures out which program image to
actually load. Previously, we opened the path to our main executable in
find_shebang_interpreter_for_executable, read the first page (or less,
if the file was smaller) and then decided whether to recurse with the
interpreter instead. We then then re-opened the main executable in
do_exec.
However, since we now want to parse the ELF header and Program Headers
of an elf image before even doing any memory region work, we can change
the way this whole process works. We open the file and read (up to) the
first page in exec() itself, then pass just the page and the amount read
to find_shebang_interpreter_for_executable. Since we now have that page
and the FileDescription for the main executable handy, we can do a few
things. First, validate the ELF header and ELF program headers for any
shenanigans. ELF32 Little Endian i386 only, please. Second, we can grab
the PT_INTERP interpreter from any ET_DYN files, and open that guy right
away if it exists. Finally, we can pass the main executable's and
optionally the PT_INTERP interpreter's file descriptions down to do_exec
and not have to feel guilty about opening the file twice.
In do_exec, we now have a choice. Are we going to load the main
executable, or the interpreter? We could load both, but it'll be way
easier for the inital pass on the RTLD if we only load the interpreter.
Then it can load the main executable itself like any old shared object,
just, the one with main in it :). Later on we can load both of them
into memory and the RTLD can relocate itself before trying to do
anything. The way it's written now the RTLD will get dibs on its
requested virtual addresses being the actual virtual addresses.
2020-01-11 01:28:02 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> do_kill(Process&, int signal);
|
|
|
|
ErrorOr<void> do_killpg(ProcessGroupID pgrp, int signal);
|
|
|
|
ErrorOr<void> do_killall(int signal);
|
|
|
|
ErrorOr<void> do_killself(int signal);
|
2019-11-14 16:16:30 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<siginfo_t> do_waitid(Variant<Empty, NonnullLockRefPtr<Process>, NonnullLockRefPtr<ProcessGroup>> waitee, int options);
|
2020-02-05 16:42:43 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Userspace<char const*> user_path, size_t path_length);
|
|
|
|
static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Syscall::StringArgument const&);
|
2020-01-06 10:05:59 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
bool has_tracee_thread(ProcessID tracer_pid);
|
2020-03-28 08:47:16 +00:00
|
|
|
|
2022-07-02 09:42:17 +00:00
|
|
|
void clear_signal_handlers_for_exec();
|
2020-12-22 06:21:58 +00:00
|
|
|
void clear_futex_queues_on_exec();
|
|
|
|
|
2022-07-13 22:25:35 +00:00
|
|
|
ErrorOr<GlobalFutexKey> get_futex_key(FlatPtr user_address, bool shared);
|
|
|
|
|
2021-10-28 20:33:41 +00:00
|
|
|
ErrorOr<void> remap_range_as_stack(FlatPtr address, size_t size);
|
|
|
|
|
2021-12-04 12:26:13 +00:00
|
|
|
ErrorOr<FlatPtr> read_impl(int fd, Userspace<u8*> buffer, size_t size);
|
|
|
|
|
2021-08-10 17:51:28 +00:00
|
|
|
public:
|
2022-08-19 18:53:40 +00:00
|
|
|
NonnullLockRefPtr<ProcessProcFSTraits> procfs_traits() const { return *m_procfs_traits; }
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> procfs_get_fds_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_perf_events(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_unveil_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_pledge_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_virtual_memory_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_binary_link(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_current_work_directory_link(KBufferBuilder& builder) const;
|
2022-06-13 05:04:35 +00:00
|
|
|
ErrorOr<void> procfs_get_command_line(KBufferBuilder& builder) const;
|
2021-08-10 17:51:28 +00:00
|
|
|
mode_t binary_link_required_mode() const;
|
2022-01-15 22:04:24 +00:00
|
|
|
ErrorOr<void> procfs_get_thread_stack(ThreadID thread_id, KBufferBuilder& builder) const;
|
2021-11-18 14:11:31 +00:00
|
|
|
ErrorOr<void> traverse_stacks_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<NonnullLockRefPtr<Inode>> lookup_stacks_directory(ProcFS const&, StringView name) const;
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<size_t> procfs_get_file_description_link(unsigned fd, KBufferBuilder& builder) const;
|
2021-11-18 14:11:31 +00:00
|
|
|
ErrorOr<void> traverse_file_descriptions_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<NonnullLockRefPtr<Inode>> lookup_file_descriptions_directory(ProcFS const&, StringView name) const;
|
|
|
|
ErrorOr<NonnullLockRefPtr<Inode>> lookup_children_directory(ProcFS const&, StringView name) const;
|
2022-05-04 15:22:18 +00:00
|
|
|
ErrorOr<void> traverse_children_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
|
|
|
|
ErrorOr<size_t> procfs_get_child_proccess_link(ProcessID child_pid, KBufferBuilder& builder) const;
|
2021-08-10 17:51:28 +00:00
|
|
|
|
|
|
|
private:
|
2021-04-25 21:42:36 +00:00
|
|
|
inline PerformanceEventBuffer* current_perf_events_buffer()
|
|
|
|
{
|
2021-05-23 20:16:30 +00:00
|
|
|
if (g_profiling_all_threads)
|
|
|
|
return g_global_perf_events;
|
2021-10-31 22:51:08 +00:00
|
|
|
if (m_profiling)
|
2021-05-23 20:16:30 +00:00
|
|
|
return m_perf_event_buffer.ptr();
|
2021-10-31 22:51:08 +00:00
|
|
|
return nullptr;
|
2021-04-25 21:42:36 +00:00
|
|
|
}
|
|
|
|
|
2022-01-16 16:06:33 +00:00
|
|
|
IntrusiveListNode<Process> m_list_node;
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-09-07 10:53:28 +00:00
|
|
|
NonnullOwnPtr<KString> m_name;
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<OwnPtr<Memory::AddressSpace>, LockRank::None> m_space;
|
2021-02-08 14:45:40 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
LockRefPtr<ProcessGroup> m_pg;
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
RecursiveSpinlock<LockRank::None> mutable m_protected_data_lock;
|
2021-07-07 16:29:19 +00:00
|
|
|
AtomicEdgeAction<u32> m_protected_data_refs;
|
2021-03-10 18:59:46 +00:00
|
|
|
void protect_data();
|
|
|
|
void unprotect_data();
|
2020-06-17 12:58:00 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
OwnPtr<ThreadTracer> m_tracer;
|
|
|
|
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
public:
|
2021-09-07 11:39:11 +00:00
|
|
|
class OpenFileDescriptionAndFlags {
|
2020-07-30 21:50:31 +00:00
|
|
|
public:
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
bool is_valid() const { return !m_description.is_null(); }
|
2021-07-28 06:59:24 +00:00
|
|
|
bool is_allocated() const { return m_is_allocated; }
|
|
|
|
void allocate()
|
|
|
|
{
|
|
|
|
VERIFY(!m_is_allocated);
|
|
|
|
VERIFY(!is_valid());
|
|
|
|
m_is_allocated = true;
|
|
|
|
}
|
|
|
|
void deallocate()
|
|
|
|
{
|
|
|
|
VERIFY(m_is_allocated);
|
|
|
|
VERIFY(!is_valid());
|
|
|
|
m_is_allocated = false;
|
|
|
|
}
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescription* description() { return m_description; }
|
2022-04-01 17:58:27 +00:00
|
|
|
OpenFileDescription const* description() const { return m_description; }
|
2020-07-30 21:50:31 +00:00
|
|
|
u32 flags() const { return m_flags; }
|
|
|
|
void set_flags(u32 flags) { m_flags = flags; }
|
|
|
|
|
2019-04-29 02:55:54 +00:00
|
|
|
void clear();
|
2022-08-19 18:53:40 +00:00
|
|
|
void set(NonnullLockRefPtr<OpenFileDescription>&&, u32 flags = 0);
|
2020-07-30 21:50:31 +00:00
|
|
|
|
|
|
|
private:
|
2022-08-19 18:53:40 +00:00
|
|
|
LockRefPtr<OpenFileDescription> m_description;
|
2021-07-28 06:59:24 +00:00
|
|
|
bool m_is_allocated { false };
|
2020-07-30 21:50:31 +00:00
|
|
|
u32 m_flags { 0 };
|
2018-11-13 00:36:31 +00:00
|
|
|
};
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
|
2021-07-28 06:59:24 +00:00
|
|
|
class ScopedDescriptionAllocation;
|
2021-09-07 11:39:11 +00:00
|
|
|
class OpenFileDescriptions {
|
|
|
|
AK_MAKE_NONCOPYABLE(OpenFileDescriptions);
|
2022-01-29 00:22:28 +00:00
|
|
|
AK_MAKE_NONMOVABLE(OpenFileDescriptions);
|
2021-06-22 18:22:17 +00:00
|
|
|
friend class Process;
|
|
|
|
|
|
|
|
public:
|
2022-01-29 00:22:28 +00:00
|
|
|
OpenFileDescriptions() { }
|
2022-04-01 17:58:27 +00:00
|
|
|
ALWAYS_INLINE OpenFileDescriptionAndFlags const& operator[](size_t i) const { return at(i); }
|
2021-09-07 11:39:11 +00:00
|
|
|
ALWAYS_INLINE OpenFileDescriptionAndFlags& operator[](size_t i) { return at(i); }
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<void> try_clone(Kernel::Process::OpenFileDescriptions const& other)
|
2021-06-22 18:22:17 +00:00
|
|
|
{
|
2021-11-10 10:55:37 +00:00
|
|
|
TRY(try_resize(other.m_fds_metadatas.size()));
|
2021-08-13 08:37:07 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < other.m_fds_metadatas.size(); ++i) {
|
|
|
|
m_fds_metadatas[i] = other.m_fds_metadatas[i];
|
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-06-22 18:22:17 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
OpenFileDescriptionAndFlags const& at(size_t i) const;
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescriptionAndFlags& at(size_t i);
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescriptionAndFlags const* get_if_valid(size_t i) const;
|
|
|
|
OpenFileDescriptionAndFlags* get_if_valid(size_t i);
|
2021-08-16 02:02:48 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
void enumerate(Function<void(OpenFileDescriptionAndFlags const&)>) const;
|
|
|
|
ErrorOr<void> try_enumerate(Function<ErrorOr<void>(OpenFileDescriptionAndFlags const&)>) const;
|
2021-09-07 11:39:11 +00:00
|
|
|
void change_each(Function<void(OpenFileDescriptionAndFlags&)>);
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<ScopedDescriptionAllocation> allocate(int first_candidate_fd = 0);
|
2021-06-22 18:22:17 +00:00
|
|
|
size_t open_count() const;
|
|
|
|
|
2021-11-10 10:55:37 +00:00
|
|
|
ErrorOr<void> try_resize(size_t size) { return m_fds_metadatas.try_resize(size); }
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2021-10-31 22:45:01 +00:00
|
|
|
static constexpr size_t max_open()
|
2021-06-22 18:22:17 +00:00
|
|
|
{
|
2021-10-31 22:45:01 +00:00
|
|
|
return s_max_open_file_descriptors;
|
2021-06-22 18:22:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
m_fds_metadatas.clear();
|
|
|
|
}
|
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<NonnullLockRefPtr<OpenFileDescription>> open_file_description(int fd) const;
|
2021-06-22 18:22:17 +00:00
|
|
|
|
|
|
|
private:
|
2021-10-31 22:45:01 +00:00
|
|
|
static constexpr size_t s_max_open_file_descriptors { FD_SETSIZE };
|
2021-09-07 11:39:11 +00:00
|
|
|
Vector<OpenFileDescriptionAndFlags> m_fds_metadatas;
|
2021-06-22 18:22:17 +00:00
|
|
|
};
|
|
|
|
|
2021-07-28 06:59:24 +00:00
|
|
|
class ScopedDescriptionAllocation {
|
|
|
|
AK_MAKE_NONCOPYABLE(ScopedDescriptionAllocation);
|
|
|
|
|
|
|
|
public:
|
|
|
|
ScopedDescriptionAllocation() = default;
|
2021-09-07 11:39:11 +00:00
|
|
|
ScopedDescriptionAllocation(int tracked_fd, OpenFileDescriptionAndFlags* description)
|
2021-07-28 06:59:24 +00:00
|
|
|
: fd(tracked_fd)
|
|
|
|
, m_description(description)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ScopedDescriptionAllocation(ScopedDescriptionAllocation&& other)
|
|
|
|
: fd(other.fd)
|
|
|
|
{
|
|
|
|
// Take over the responsibility of tracking to deallocation.
|
|
|
|
swap(m_description, other.m_description);
|
|
|
|
}
|
|
|
|
|
2022-01-29 00:22:28 +00:00
|
|
|
ScopedDescriptionAllocation& operator=(ScopedDescriptionAllocation&& other)
|
|
|
|
{
|
|
|
|
if (this != &other) {
|
|
|
|
m_description = exchange(other.m_description, nullptr);
|
|
|
|
fd = exchange(other.fd, -1);
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2021-07-28 06:59:24 +00:00
|
|
|
~ScopedDescriptionAllocation()
|
|
|
|
{
|
|
|
|
if (m_description && m_description->is_allocated() && !m_description->is_valid()) {
|
|
|
|
m_description->deallocate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 00:22:28 +00:00
|
|
|
int fd { -1 };
|
2021-07-28 06:59:24 +00:00
|
|
|
|
|
|
|
private:
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescriptionAndFlags* m_description { nullptr };
|
2021-07-28 06:59:24 +00:00
|
|
|
};
|
|
|
|
|
2021-08-14 12:43:34 +00:00
|
|
|
class ProcessProcFSTraits : public ProcFSExposedComponent {
|
|
|
|
public:
|
2022-08-19 18:53:40 +00:00
|
|
|
static ErrorOr<NonnullLockRefPtr<ProcessProcFSTraits>> try_create(Badge<Process>, LockWeakPtr<Process> process)
|
2021-08-14 12:43:34 +00:00
|
|
|
{
|
2022-08-19 18:53:40 +00:00
|
|
|
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) ProcessProcFSTraits(move(process)));
|
2021-08-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual InodeIndex component_index() const override;
|
2022-11-25 20:29:27 +00:00
|
|
|
virtual ErrorOr<NonnullLockRefPtr<ProcFSInode>> to_inode(ProcFS const& procfs_instance) const override;
|
2021-11-18 14:11:31 +00:00
|
|
|
virtual ErrorOr<void> traverse_as_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)>) const override;
|
2021-08-14 12:43:34 +00:00
|
|
|
virtual mode_t required_mode() const override { return 0555; }
|
|
|
|
|
2021-08-28 20:11:16 +00:00
|
|
|
virtual UserID owner_user() const override;
|
|
|
|
virtual GroupID owner_group() const override;
|
2021-08-14 12:43:34 +00:00
|
|
|
|
|
|
|
private:
|
2022-08-19 18:53:40 +00:00
|
|
|
explicit ProcessProcFSTraits(LockWeakPtr<Process> process)
|
2022-02-13 19:21:14 +00:00
|
|
|
: m_process(move(process))
|
2021-08-14 12:43:34 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: We need to weakly hold on to the process, because otherwise
|
|
|
|
// we would be creating a reference cycle.
|
2022-08-19 18:53:40 +00:00
|
|
|
LockWeakPtr<Process> m_process;
|
2021-08-14 12:43:34 +00:00
|
|
|
};
|
|
|
|
|
2022-01-29 00:29:07 +00:00
|
|
|
MutexProtected<OpenFileDescriptions>& fds() { return m_fds; }
|
|
|
|
MutexProtected<OpenFileDescriptions> const& fds() const { return m_fds; }
|
2022-01-29 00:22:28 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<NonnullLockRefPtr<OpenFileDescription>> open_file_description(int fd)
|
2022-01-29 00:22:28 +00:00
|
|
|
{
|
2022-01-29 00:29:07 +00:00
|
|
|
return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
|
2022-01-29 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<NonnullLockRefPtr<OpenFileDescription>> open_file_description(int fd) const
|
2022-01-29 00:22:28 +00:00
|
|
|
{
|
2022-01-29 00:29:07 +00:00
|
|
|
return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
|
2022-01-29 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ErrorOr<ScopedDescriptionAllocation> allocate_fd()
|
|
|
|
{
|
2022-01-29 00:29:07 +00:00
|
|
|
return m_fds.with_exclusive([](auto& fds) { return fds.allocate(); });
|
2022-01-29 00:22:28 +00:00
|
|
|
}
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
|
|
|
|
private:
|
2022-10-01 11:28:27 +00:00
|
|
|
ErrorOr<NonnullRefPtr<Custody>> custody_for_dirfd(int dirfd);
|
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<Thread::ListInProcess, LockRank::None>& thread_list() { return m_thread_list; }
|
|
|
|
SpinlockProtected<Thread::ListInProcess, LockRank::None> const& thread_list() const { return m_thread_list; }
|
2021-08-07 11:28:18 +00:00
|
|
|
|
2022-07-24 14:00:51 +00:00
|
|
|
ErrorOr<NonnullRefPtr<Thread>> get_thread_from_pid_or_tid(pid_t pid_or_tid, Syscall::SchedulerParametersMode mode);
|
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<Thread::ListInProcess, LockRank::None> m_thread_list {};
|
2018-11-07 17:30:59 +00:00
|
|
|
|
2022-01-29 00:29:07 +00:00
|
|
|
MutexProtected<OpenFileDescriptions> m_fds;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
bool const m_is_kernel_process;
|
2021-07-13 23:15:24 +00:00
|
|
|
Atomic<State> m_state { State::Running };
|
2019-12-11 19:36:56 +00:00
|
|
|
bool m_profiling { false };
|
2021-01-03 23:58:50 +00:00
|
|
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_stopped { false };
|
2021-08-22 12:51:04 +00:00
|
|
|
bool m_should_generate_coredump { false };
|
2019-08-08 12:56:50 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<RefPtr<Custody>, LockRank::None> m_executable;
|
2022-03-07 16:56:25 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<RefPtr<Custody>, LockRank::None> m_current_directory;
|
2018-10-24 12:28:22 +00:00
|
|
|
|
2021-09-09 09:36:40 +00:00
|
|
|
NonnullOwnPtrVector<KString> m_arguments;
|
|
|
|
NonnullOwnPtrVector<KString> m_environment;
|
2021-01-15 19:21:03 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
LockRefPtr<TTY> m_tty;
|
2018-10-30 12:59:29 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
LockWeakPtr<Memory::Region> m_master_tls_region;
|
2022-11-02 20:26:02 +00:00
|
|
|
|
|
|
|
IntrusiveListNode<Process> m_jail_list_node;
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<RefPtr<Jail>, LockRank::Process> m_attached_jail {};
|
2022-11-02 20:26:02 +00:00
|
|
|
|
2019-09-07 13:50:44 +00:00
|
|
|
size_t m_master_tls_size { 0 };
|
|
|
|
size_t m_master_tls_alignment { 0 };
|
|
|
|
|
2022-07-11 17:32:29 +00:00
|
|
|
Mutex m_big_lock { "Process"sv, Mutex::MutexBehavior::BigLock };
|
|
|
|
Mutex m_ptrace_lock { "ptrace"sv };
|
2019-06-07 09:30:07 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
LockRefPtr<Timer> m_alarm_timer;
|
2019-07-29 05:26:01 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<UnveilData, LockRank::None> m_unveil_data;
|
|
|
|
SpinlockProtected<UnveilData, LockRank::None> m_exec_unveil_data;
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
|
2020-02-02 19:26:27 +00:00
|
|
|
OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
|
2020-02-17 12:29:49 +00:00
|
|
|
|
2020-03-28 08:47:16 +00:00
|
|
|
// This member is used in the implementation of ptrace's PT_TRACEME flag.
|
|
|
|
// If it is set to true, the process will stop at the next execve syscall
|
|
|
|
// and wait for a tracer to attach.
|
|
|
|
bool m_wait_for_tracer_at_next_execve { false };
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2021-08-22 23:22:38 +00:00
|
|
|
Thread::WaitBlockerSet m_wait_blocker_set;
|
2020-12-30 14:19:57 +00:00
|
|
|
|
2021-08-05 21:43:10 +00:00
|
|
|
struct CoredumpProperty {
|
|
|
|
OwnPtr<KString> key;
|
|
|
|
OwnPtr<KString> value;
|
|
|
|
};
|
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
SpinlockProtected<Array<CoredumpProperty, 4>, LockRank::None> m_coredump_properties {};
|
2022-08-19 18:53:40 +00:00
|
|
|
NonnullLockRefPtrVector<Thread> m_threads_for_coredump;
|
2021-06-06 21:40:03 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
mutable LockRefPtr<ProcessProcFSTraits> m_procfs_traits;
|
2022-02-24 18:55:49 +00:00
|
|
|
struct SignalActionData {
|
|
|
|
VirtualAddress handler_or_sigaction;
|
|
|
|
int flags { 0 };
|
|
|
|
u32 mask { 0 };
|
|
|
|
};
|
|
|
|
Array<SignalActionData, NSIG> m_signal_action_data;
|
2021-08-14 12:43:34 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
static_assert(sizeof(ProtectedValues) < (PAGE_SIZE));
|
2022-08-21 10:18:26 +00:00
|
|
|
alignas(4096) ProtectedValues m_protected_values_do_not_access_directly;
|
2021-08-07 19:30:06 +00:00
|
|
|
u8 m_protected_values_padding[PAGE_SIZE - sizeof(ProtectedValues)];
|
|
|
|
|
2021-06-06 21:40:03 +00:00
|
|
|
public:
|
2021-09-09 12:00:59 +00:00
|
|
|
using List = IntrusiveListRelaxedConst<&Process::m_list_node>;
|
2022-11-09 10:39:58 +00:00
|
|
|
static SpinlockProtected<Process::List, LockRank::None>& all_instances();
|
2018-10-16 09:01:38 +00:00
|
|
|
};
|
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
// Note: Process object should be 2 pages of 4096 bytes each.
|
|
|
|
// It's not expected that the Process object will expand further because the first
|
|
|
|
// page is used for all unprotected values (which should be plenty of space for them).
|
|
|
|
// The second page is being used exclusively for write-protected values.
|
2021-09-05 07:57:53 +00:00
|
|
|
static_assert(AssertSize<Process, (PAGE_SIZE * 2)>());
|
2021-08-07 19:30:06 +00:00
|
|
|
|
2022-11-09 10:39:58 +00:00
|
|
|
extern RecursiveSpinlock<LockRank::None> g_profiling_lock;
|
2021-07-24 16:43:29 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2021-03-11 13:12:55 +00:00
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback)
|
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
return thread_list().with([&](auto& thread_list) -> IterationDecision {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2021-03-11 13:12:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Process&> Callback>
|
2022-11-02 20:26:02 +00:00
|
|
|
inline void Process::for_each_ignoring_jails(Callback callback)
|
2018-11-08 15:09:05 +00:00
|
|
|
{
|
2022-04-01 17:58:27 +00:00
|
|
|
Process::all_instances().with([&](auto const& list) {
|
2021-07-24 16:43:29 +00:00
|
|
|
for (auto it = list.begin(); it != list.end();) {
|
|
|
|
auto& process = *it;
|
|
|
|
++it;
|
2022-11-02 20:26:02 +00:00
|
|
|
if (callback(process) == IterationDecision::Break)
|
|
|
|
break;
|
2019-03-23 21:03:17 +00:00
|
|
|
}
|
2021-07-24 16:43:29 +00:00
|
|
|
});
|
2018-11-08 15:09:05 +00:00
|
|
|
}
|
2019-02-21 14:45:31 +00:00
|
|
|
|
2022-11-02 20:26:02 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback) const
|
2021-05-16 09:36:52 +00:00
|
|
|
{
|
2022-11-02 20:26:02 +00:00
|
|
|
return thread_list().with([&](auto& thread_list) -> IterationDecision {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
2021-05-16 09:36:52 +00:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback) const
|
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
thread_list().with([&](auto& thread_list) {
|
|
|
|
for (auto& thread : thread_list)
|
|
|
|
callback(thread);
|
|
|
|
});
|
2021-05-16 09:36:52 +00:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
}
|
|
|
|
|
2022-02-24 18:01:33 +00:00
|
|
|
inline ErrorOr<void> Process::try_for_each_thread(Function<ErrorOr<void>(Thread const&)> callback) const
|
|
|
|
{
|
|
|
|
return thread_list().with([&](auto& thread_list) -> ErrorOr<void> {
|
|
|
|
for (auto& thread : thread_list)
|
|
|
|
TRY(callback(thread));
|
|
|
|
return {};
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback)
|
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
thread_list().with([&](auto& thread_list) {
|
|
|
|
for (auto& thread : thread_list)
|
|
|
|
callback(thread);
|
|
|
|
});
|
2021-05-16 09:36:52 +00:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
}
|
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
inline ProcessID Thread::pid() const
|
2019-03-23 21:03:17 +00:00
|
|
|
{
|
2020-08-02 02:04:56 +00:00
|
|
|
return m_process->pid();
|
2019-03-23 21:03:17 +00:00
|
|
|
}
|
2019-07-08 16:58:19 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|
2020-09-12 03:11:07 +00:00
|
|
|
|
2021-07-18 16:18:35 +00:00
|
|
|
#define VERIFY_PROCESS_BIG_LOCK_ACQUIRED(process) \
|
2022-08-17 20:03:04 +00:00
|
|
|
VERIFY(process->big_lock().is_exclusively_locked_by_current_thread())
|
2021-07-18 16:18:35 +00:00
|
|
|
|
|
|
|
#define VERIFY_NO_PROCESS_BIG_LOCK(process) \
|
2022-08-17 20:03:04 +00:00
|
|
|
VERIFY(!process->big_lock().is_exclusively_locked_by_current_thread())
|
2021-07-18 16:18:35 +00:00
|
|
|
|
2023-01-04 17:25:47 +00:00
|
|
|
inline ErrorOr<NonnullOwnPtr<KString>> try_copy_kstring_from_user(Kernel::Syscall::StringArgument const& string)
|
2021-05-28 07:29:16 +00:00
|
|
|
{
|
2021-08-13 05:04:31 +00:00
|
|
|
Userspace<char const*> characters((FlatPtr)string.characters);
|
|
|
|
return try_copy_kstring_from_user(characters, string.length);
|
2021-05-28 07:29:16 +00:00
|
|
|
}
|
|
|
|
|
2021-01-08 23:11:15 +00:00
|
|
|
template<>
|
2021-10-21 17:26:36 +00:00
|
|
|
struct AK::Formatter<Kernel::Process> : AK::Formatter<FormatString> {
|
2021-11-16 00:15:21 +00:00
|
|
|
ErrorOr<void> format(FormatBuilder& builder, Kernel::Process const& value)
|
2021-01-08 23:11:15 +00:00
|
|
|
{
|
2022-07-11 17:32:29 +00:00
|
|
|
return AK::Formatter<FormatString>::format(builder, "{}({})"sv, value.name(), value.pid().value());
|
2021-01-08 23:11:15 +00:00
|
|
|
}
|
|
|
|
};
|
2022-07-13 22:25:35 +00:00
|
|
|
|
|
|
|
namespace AK {
|
|
|
|
template<>
|
|
|
|
struct Traits<Kernel::GlobalFutexKey> : public GenericTraits<Kernel::GlobalFutexKey> {
|
|
|
|
static unsigned hash(Kernel::GlobalFutexKey const& futex_key) { return pair_int_hash(ptr_hash(futex_key.raw.parent), ptr_hash(futex_key.raw.offset)); }
|
|
|
|
static bool equals(Kernel::GlobalFutexKey const& a, Kernel::GlobalFutexKey const& b) { return a.raw.parent == b.raw.parent && a.raw.offset == b.raw.offset; }
|
|
|
|
};
|
|
|
|
};
|