2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2021-02-08 14:45:40 +00:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
#pragma once
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
#include <AK/Concepts.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <AK/HashMap.h>
|
2021-06-06 21:40:03 +00:00
|
|
|
#include <AK/IntrusiveList.h>
|
2021-07-24 16:43:29 +00:00
|
|
|
#include <AK/IntrusiveListRelaxedConst.h>
|
2020-08-02 02:04:56 +00:00
|
|
|
#include <AK/NonnullRefPtrVector.h>
|
2021-08-07 19:30:06 +00:00
|
|
|
#include <AK/OwnPtr.h>
|
2020-07-31 14:28:37 +00:00
|
|
|
#include <AK/Userspace.h>
|
2021-08-23 04:01:04 +00:00
|
|
|
#include <AK/Variant.h>
|
2020-02-24 12:24:30 +00:00
|
|
|
#include <AK/WeakPtr.h>
|
2020-08-06 09:17:53 +00:00
|
|
|
#include <AK/Weakable.h>
|
2022-01-31 21:09:30 +00:00
|
|
|
#include <Kernel/API/POSIX/sys/resource.h>
|
2020-07-04 23:37:36 +00:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2021-11-30 22:59:05 +00:00
|
|
|
#include <Kernel/Assertions.h>
|
2021-07-07 16:29:19 +00:00
|
|
|
#include <Kernel/AtomicEdgeAction.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <Kernel/FileSystem/InodeMetadata.h>
|
2021-09-07 11:39:11 +00:00
|
|
|
#include <Kernel/FileSystem/OpenFileDescription.h>
|
2021-08-06 12:11:45 +00:00
|
|
|
#include <Kernel/FileSystem/UnveilNode.h>
|
2020-02-16 00:50:16 +00:00
|
|
|
#include <Kernel/Forward.h>
|
2020-12-22 06:21:58 +00:00
|
|
|
#include <Kernel/FutexQueue.h>
|
2021-07-18 07:10:27 +00:00
|
|
|
#include <Kernel/Locking/Mutex.h>
|
2021-08-21 21:31:15 +00:00
|
|
|
#include <Kernel/Locking/MutexProtected.h>
|
2021-08-06 11:57:39 +00:00
|
|
|
#include <Kernel/Memory/AddressSpace.h>
|
2021-04-25 21:42:36 +00:00
|
|
|
#include <Kernel/PerformanceEventBuffer.h>
|
2021-08-10 17:51:28 +00:00
|
|
|
#include <Kernel/ProcessExposed.h>
|
2020-08-15 19:13:19 +00:00
|
|
|
#include <Kernel/ProcessGroup.h>
|
2020-06-18 20:18:44 +00:00
|
|
|
#include <Kernel/StdLib.h>
|
2019-03-23 21:03:17 +00:00
|
|
|
#include <Kernel/Thread.h>
|
2019-05-28 09:53:16 +00:00
|
|
|
#include <Kernel/UnixTypes.h>
|
2021-04-16 19:53:43 +00:00
|
|
|
#include <LibC/elf.h>
|
2019-05-26 00:08:51 +00:00
|
|
|
#include <LibC/signal_numbers.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2022-01-12 22:27:33 +00:00
|
|
|
MutexProtected<OwnPtr<KString>>& hostname();
|
2021-02-28 01:18:48 +00:00
|
|
|
Time kgettimeofday();
|
2019-03-13 12:13:23 +00:00
|
|
|
|
2020-06-18 20:18:44 +00:00
|
|
|
#define ENUMERATE_PLEDGE_PROMISES \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(stdio) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(rpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(wpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(cpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(dpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(inet) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(id) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(proc) \
|
2021-01-11 21:30:57 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(ptrace) \
|
2020-06-18 20:18:44 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(exec) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(unix) \
|
2020-06-24 20:57:37 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(recvfd) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(sendfd) \
|
2020-06-18 20:18:44 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(fattr) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(tty) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(chown) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(thread) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(video) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(accept) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(settime) \
|
2020-05-26 10:49:35 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(sigaction) \
|
2021-01-29 17:50:27 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(setkeymap) \
|
2021-01-31 21:50:17 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(prot_exec) \
|
2021-02-21 00:08:48 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(map_fixed) \
|
2022-03-24 20:47:42 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(getkeymap) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(no_error)
|
2020-01-11 19:48:43 +00:00
|
|
|
|
|
|
|
enum class Pledge : u32 {
|
|
|
|
#define __ENUMERATE_PLEDGE_PROMISE(x) x,
|
|
|
|
ENUMERATE_PLEDGE_PROMISES
|
|
|
|
#undef __ENUMERATE_PLEDGE_PROMISE
|
|
|
|
};
|
|
|
|
|
2020-01-21 18:28:29 +00:00
|
|
|
enum class VeilState {
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
None,
|
2020-01-21 18:28:29 +00:00
|
|
|
Dropped,
|
|
|
|
Locked,
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
};
|
|
|
|
|
2022-07-13 06:29:51 +00:00
|
|
|
using FutexQueues = HashMap<FlatPtr, NonnullRefPtr<FutexQueue>>;
|
2020-12-22 06:21:58 +00:00
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
struct LoadResult;
|
|
|
|
|
2021-08-14 13:20:13 +00:00
|
|
|
class Process final
|
2021-12-28 22:46:21 +00:00
|
|
|
: public ListedRefCounted<Process, LockType::Spinlock>
|
2020-08-06 09:17:53 +00:00
|
|
|
, public Weakable<Process> {
|
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
class ProtectedValues {
|
|
|
|
public:
|
|
|
|
ProcessID pid { 0 };
|
|
|
|
ProcessID ppid { 0 };
|
|
|
|
SessionID sid { 0 };
|
2021-08-28 20:11:16 +00:00
|
|
|
UserID euid { 0 };
|
|
|
|
GroupID egid { 0 };
|
|
|
|
UserID uid { 0 };
|
|
|
|
GroupID gid { 0 };
|
|
|
|
UserID suid { 0 };
|
|
|
|
GroupID sgid { 0 };
|
|
|
|
Vector<GroupID> extra_gids;
|
2021-08-07 19:30:06 +00:00
|
|
|
bool dumpable { false };
|
|
|
|
Atomic<bool> has_promises { false };
|
|
|
|
Atomic<u32> promises { 0 };
|
|
|
|
Atomic<bool> has_execpromises { false };
|
|
|
|
Atomic<u32> execpromises { 0 };
|
|
|
|
mode_t umask { 022 };
|
|
|
|
VirtualAddress signal_trampoline;
|
|
|
|
Atomic<u32> thread_count { 0 };
|
|
|
|
u8 termination_status { 0 };
|
|
|
|
u8 termination_signal { 0 };
|
|
|
|
};
|
|
|
|
|
|
|
|
public:
|
2020-04-22 09:54:58 +00:00
|
|
|
AK_MAKE_NONCOPYABLE(Process);
|
|
|
|
AK_MAKE_NONMOVABLE(Process);
|
|
|
|
|
2021-03-11 12:13:05 +00:00
|
|
|
MAKE_ALIGNED_ALLOCATED(Process, PAGE_SIZE);
|
|
|
|
|
2019-03-23 21:03:17 +00:00
|
|
|
friend class Thread;
|
2021-08-22 12:51:04 +00:00
|
|
|
friend class Coredump;
|
2019-05-28 09:53:16 +00:00
|
|
|
|
2021-03-10 18:59:46 +00:00
|
|
|
// Helper class to temporarily unprotect a process's protected data so you can write to it.
|
2021-03-11 12:13:05 +00:00
|
|
|
class ProtectedDataMutationScope {
|
2021-03-10 18:59:46 +00:00
|
|
|
public:
|
2021-03-11 12:13:05 +00:00
|
|
|
explicit ProtectedDataMutationScope(Process& process)
|
2021-03-10 18:59:46 +00:00
|
|
|
: m_process(process)
|
|
|
|
{
|
|
|
|
m_process.unprotect_data();
|
|
|
|
}
|
|
|
|
|
2021-03-11 12:13:05 +00:00
|
|
|
~ProtectedDataMutationScope() { m_process.protect_data(); }
|
2021-03-10 18:59:46 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
Process& m_process;
|
|
|
|
};
|
|
|
|
|
2021-07-13 23:15:24 +00:00
|
|
|
enum class State : u8 {
|
|
|
|
Running = 0,
|
|
|
|
Dying,
|
|
|
|
Dead
|
|
|
|
};
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
public:
|
2021-08-14 12:43:34 +00:00
|
|
|
class ProcessProcFSTraits;
|
|
|
|
|
2021-08-19 19:45:07 +00:00
|
|
|
inline static Process& current()
|
2020-06-28 21:34:31 +00:00
|
|
|
{
|
2021-10-31 22:36:52 +00:00
|
|
|
auto* current_thread = Processor::current_thread();
|
2021-08-19 19:45:07 +00:00
|
|
|
VERIFY(current_thread);
|
|
|
|
return current_thread->process();
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static bool has_current()
|
|
|
|
{
|
2021-11-06 21:06:08 +00:00
|
|
|
return Processor::current_thread() != nullptr;
|
2020-06-28 21:34:31 +00:00
|
|
|
}
|
2020-02-17 14:04:27 +00:00
|
|
|
|
2021-05-23 19:45:58 +00:00
|
|
|
template<typename EntryFunction>
|
|
|
|
static void kernel_process_trampoline(void* data)
|
|
|
|
{
|
|
|
|
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
|
|
|
|
(*func)();
|
|
|
|
delete func;
|
|
|
|
}
|
|
|
|
|
2021-07-09 01:37:36 +00:00
|
|
|
enum class RegisterProcess {
|
|
|
|
No,
|
|
|
|
Yes
|
|
|
|
};
|
|
|
|
|
2020-11-17 03:51:34 +00:00
|
|
|
template<typename EntryFunction>
|
2021-09-07 10:53:28 +00:00
|
|
|
static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes)
|
2020-11-17 03:51:34 +00:00
|
|
|
{
|
|
|
|
auto* entry_func = new EntryFunction(move(entry));
|
2021-07-09 01:37:36 +00:00
|
|
|
return create_kernel_process(first_thread, move(name), &Process::kernel_process_trampoline<EntryFunction>, entry_func, affinity, do_register);
|
2020-11-17 03:51:34 +00:00
|
|
|
}
|
|
|
|
|
2021-09-07 10:53:28 +00:00
|
|
|
static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes);
|
2021-11-07 23:51:39 +00:00
|
|
|
static ErrorOr<NonnullRefPtr<Process>> try_create_user_process(RefPtr<Thread>& first_thread, StringView path, UserID, GroupID, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, TTY*);
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
static void register_new(Process&);
|
2021-08-14 13:11:40 +00:00
|
|
|
|
2018-11-01 12:15:46 +00:00
|
|
|
~Process();
|
2018-10-23 10:44:46 +00:00
|
|
|
|
2021-09-06 10:44:27 +00:00
|
|
|
RefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
|
2020-06-28 21:34:31 +00:00
|
|
|
|
2019-12-11 19:36:56 +00:00
|
|
|
bool is_profiling() const { return m_profiling; }
|
|
|
|
void set_profiling(bool profiling) { m_profiling = profiling; }
|
2021-08-22 12:51:04 +00:00
|
|
|
|
|
|
|
bool should_generate_coredump() const { return m_should_generate_coredump; }
|
|
|
|
void set_should_generate_coredump(bool b) { m_should_generate_coredump = b; }
|
2019-12-11 19:36:56 +00:00
|
|
|
|
2021-07-13 23:15:24 +00:00
|
|
|
bool is_dying() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) != State::Running; }
|
|
|
|
bool is_dead() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) == State::Dead; }
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-01-03 23:58:50 +00:00
|
|
|
bool is_stopped() const { return m_is_stopped; }
|
|
|
|
bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); }
|
2020-12-09 04:18:45 +00:00
|
|
|
|
2020-09-10 15:46:24 +00:00
|
|
|
bool is_kernel_process() const { return m_is_kernel_process; }
|
|
|
|
bool is_user_process() const { return !m_is_kernel_process; }
|
2018-11-07 20:19:47 +00:00
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
static RefPtr<Process> from_pid(ProcessID);
|
2020-08-08 20:04:20 +00:00
|
|
|
static SessionID get_sid_from_pgid(ProcessGroupID pgid);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-09-07 10:53:28 +00:00
|
|
|
StringView name() const { return m_name->view(); }
|
2021-08-07 19:30:06 +00:00
|
|
|
ProcessID pid() const { return m_protected_values.pid; }
|
|
|
|
SessionID sid() const { return m_protected_values.sid; }
|
|
|
|
bool is_session_leader() const { return sid().value() == pid().value(); }
|
2020-08-15 19:13:19 +00:00
|
|
|
ProcessGroupID pgid() const { return m_pg ? m_pg->pgid() : 0; }
|
2021-08-07 19:30:06 +00:00
|
|
|
bool is_group_leader() const { return pgid().value() == pid().value(); }
|
2021-08-28 20:11:16 +00:00
|
|
|
Vector<GroupID> const& extra_gids() const { return m_protected_values.extra_gids; }
|
|
|
|
UserID euid() const { return m_protected_values.euid; }
|
|
|
|
GroupID egid() const { return m_protected_values.egid; }
|
|
|
|
UserID uid() const { return m_protected_values.uid; }
|
|
|
|
GroupID gid() const { return m_protected_values.gid; }
|
|
|
|
UserID suid() const { return m_protected_values.suid; }
|
|
|
|
GroupID sgid() const { return m_protected_values.sgid; }
|
2021-08-07 19:30:06 +00:00
|
|
|
ProcessID ppid() const { return m_protected_values.ppid; }
|
|
|
|
|
|
|
|
bool is_dumpable() const { return m_protected_values.dumpable; }
|
2021-03-10 21:42:07 +00:00
|
|
|
void set_dumpable(bool);
|
2020-12-25 17:27:42 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
mode_t umask() const { return m_protected_values.umask; }
|
2019-02-22 01:39:13 +00:00
|
|
|
|
2021-08-28 20:11:16 +00:00
|
|
|
bool in_group(GroupID) const;
|
2019-02-27 11:32:53 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
// Breakable iteration functions
|
|
|
|
template<IteratorFunction<Process&> Callback>
|
2019-05-28 09:53:16 +00:00
|
|
|
static void for_each(Callback);
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Process&> Callback>
|
2020-08-08 20:04:20 +00:00
|
|
|
static void for_each_in_pgrp(ProcessGroupID, Callback);
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Process&> Callback>
|
2019-05-28 09:53:16 +00:00
|
|
|
void for_each_child(Callback);
|
2021-03-11 13:12:55 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2021-03-11 13:12:55 +00:00
|
|
|
IterationDecision for_each_thread(Callback);
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
|
|
|
IterationDecision for_each_thread(Callback callback) const;
|
2022-02-24 18:01:33 +00:00
|
|
|
ErrorOr<void> try_for_each_thread(Function<ErrorOr<void>(Thread const&)>) const;
|
2021-05-16 09:36:52 +00:00
|
|
|
|
|
|
|
// Non-breakable iteration functions
|
|
|
|
template<VoidFunction<Process&> Callback>
|
|
|
|
static void for_each(Callback);
|
|
|
|
template<VoidFunction<Process&> Callback>
|
|
|
|
static void for_each_in_pgrp(ProcessGroupID, Callback);
|
|
|
|
template<VoidFunction<Process&> Callback>
|
|
|
|
void for_each_child(Callback);
|
|
|
|
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
IterationDecision for_each_thread(Callback);
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
2021-03-11 13:12:55 +00:00
|
|
|
IterationDecision for_each_thread(Callback callback) const;
|
2018-11-02 13:06:48 +00:00
|
|
|
|
2019-01-30 17:26:19 +00:00
|
|
|
void die();
|
2020-12-09 04:18:45 +00:00
|
|
|
void finalize();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
ThreadTracer* tracer() { return m_tracer.ptr(); }
|
|
|
|
bool is_traced() const { return !!m_tracer; }
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> start_tracing_from(ProcessID tracer);
|
2020-12-09 04:18:45 +00:00
|
|
|
void stop_tracing();
|
2022-04-01 17:58:27 +00:00
|
|
|
void tracer_trap(Thread&, RegisterState const&);
|
2020-12-09 04:18:45 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$emuctl();
|
|
|
|
ErrorOr<FlatPtr> sys$yield();
|
|
|
|
ErrorOr<FlatPtr> sys$sync();
|
|
|
|
ErrorOr<FlatPtr> sys$beep();
|
|
|
|
ErrorOr<FlatPtr> sys$get_process_name(Userspace<char*> buffer, size_t buffer_size);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_process_name(Userspace<char const*> user_name, size_t user_name_length);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$create_inode_watcher(u32 flags);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$inode_watcher_add_watch(Userspace<Syscall::SC_inode_watcher_add_watch_params const*> user_params);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$inode_watcher_remove_watch(int fd, int wd);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$dbgputstr(Userspace<char const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$dump_backtrace();
|
|
|
|
ErrorOr<FlatPtr> sys$gettid();
|
|
|
|
ErrorOr<FlatPtr> sys$setsid();
|
|
|
|
ErrorOr<FlatPtr> sys$getsid(pid_t);
|
|
|
|
ErrorOr<FlatPtr> sys$setpgid(pid_t pid, pid_t pgid);
|
|
|
|
ErrorOr<FlatPtr> sys$getpgrp();
|
|
|
|
ErrorOr<FlatPtr> sys$getpgid(pid_t);
|
|
|
|
ErrorOr<FlatPtr> sys$getuid();
|
|
|
|
ErrorOr<FlatPtr> sys$getgid();
|
|
|
|
ErrorOr<FlatPtr> sys$geteuid();
|
|
|
|
ErrorOr<FlatPtr> sys$getegid();
|
|
|
|
ErrorOr<FlatPtr> sys$getpid();
|
|
|
|
ErrorOr<FlatPtr> sys$getppid();
|
|
|
|
ErrorOr<FlatPtr> sys$getresuid(Userspace<UserID*>, Userspace<UserID*>, Userspace<UserID*>);
|
|
|
|
ErrorOr<FlatPtr> sys$getresgid(Userspace<GroupID*>, Userspace<GroupID*>, Userspace<GroupID*>);
|
2022-01-31 21:09:30 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getrusage(int, Userspace<rusage*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$umask(mode_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$open(Userspace<Syscall::SC_open_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$close(int fd);
|
|
|
|
ErrorOr<FlatPtr> sys$read(int fd, Userspace<u8*>, size_t);
|
2021-12-17 07:34:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$pread(int fd, Userspace<u8*>, size_t, Userspace<off_t const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$readv(int fd, Userspace<const struct iovec*> iov, int iov_count);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$write(int fd, Userspace<u8 const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$writev(int fd, Userspace<const struct iovec*> iov, int iov_count);
|
|
|
|
ErrorOr<FlatPtr> sys$fstat(int fd, Userspace<stat*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$stat(Userspace<Syscall::SC_stat_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$lseek(int fd, Userspace<off_t*>, int whence);
|
2021-12-17 07:34:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$ftruncate(int fd, Userspace<off_t const*>);
|
2022-06-18 16:37:54 +00:00
|
|
|
ErrorOr<FlatPtr> sys$posix_fallocate(int fd, Userspace<off_t const*>, Userspace<off_t const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$kill(pid_t pid_or_pgid, int sig);
|
2019-02-15 11:30:48 +00:00
|
|
|
[[noreturn]] void sys$exit(int status);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigreturn(RegisterState& registers);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$waitid(Userspace<Syscall::SC_waitid_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$mmap(Userspace<Syscall::SC_mmap_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$mremap(Userspace<Syscall::SC_mremap_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$munmap(Userspace<void*>, size_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_mmap_name(Userspace<Syscall::SC_set_mmap_name_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$mprotect(Userspace<void*>, size_t, int prot);
|
|
|
|
ErrorOr<FlatPtr> sys$madvise(Userspace<void*>, size_t, int advice);
|
|
|
|
ErrorOr<FlatPtr> sys$msyscall(Userspace<void*>);
|
2021-11-17 18:33:00 +00:00
|
|
|
ErrorOr<FlatPtr> sys$msync(Userspace<void*>, size_t, int flags);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$purge(int mode);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$poll(Userspace<Syscall::SC_poll_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$get_dir_entries(int fd, Userspace<void*>, size_t);
|
|
|
|
ErrorOr<FlatPtr> sys$getcwd(Userspace<char*>, size_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$chdir(Userspace<char const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fchdir(int fd);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$adjtime(Userspace<timeval const*>, Userspace<timeval*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$clock_gettime(clockid_t, Userspace<timespec*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$clock_settime(clockid_t, Userspace<timespec const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$clock_nanosleep(Userspace<Syscall::SC_clock_nanosleep_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$gethostname(Userspace<char*>, size_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sethostname(Userspace<char const*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$uname(Userspace<utsname*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$readlink(Userspace<Syscall::SC_readlink_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fork(RegisterState&);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$execve(Userspace<Syscall::SC_execve_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$dup2(int old_fd, int new_fd);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigaction(int signum, Userspace<sigaction const*> act, Userspace<sigaction*> old_act);
|
|
|
|
ErrorOr<FlatPtr> sys$sigaltstack(Userspace<stack_t const*> ss, Userspace<stack_t*> old_ss);
|
|
|
|
ErrorOr<FlatPtr> sys$sigprocmask(int how, Userspace<sigset_t const*> set, Userspace<sigset_t*> old_set);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigpending(Userspace<sigset_t*>);
|
2022-05-13 11:15:45 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigsuspend(Userspace<sigset_t const*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sigtimedwait(Userspace<sigset_t const*>, Userspace<siginfo_t*>, Userspace<timespec const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getgroups(size_t, Userspace<gid_t*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$setgroups(size_t, Userspace<gid_t const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$pipe(int pipefd[2], int flags);
|
|
|
|
ErrorOr<FlatPtr> sys$killpg(pid_t pgrp, int sig);
|
|
|
|
ErrorOr<FlatPtr> sys$seteuid(UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setegid(GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$setuid(UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setgid(GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$setreuid(UserID, UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setresuid(UserID, UserID, UserID);
|
|
|
|
ErrorOr<FlatPtr> sys$setresgid(GroupID, GroupID, GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$alarm(unsigned seconds);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$access(Userspace<char const*> pathname, size_t path_length, int mode);
|
2022-07-02 23:02:45 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fcntl(int fd, int cmd, uintptr_t extra_arg);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$ioctl(int fd, unsigned request, FlatPtr arg);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$mkdir(Userspace<char const*> pathname, size_t path_length, mode_t mode);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$times(Userspace<tms*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$utime(Userspace<char const*> pathname, size_t path_length, Userspace<const struct utimbuf*>);
|
2022-05-02 20:26:10 +00:00
|
|
|
ErrorOr<FlatPtr> sys$utimensat(Userspace<Syscall::SC_utimensat_params const*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$link(Userspace<Syscall::SC_link_params const*>);
|
2022-02-10 11:30:33 +00:00
|
|
|
ErrorOr<FlatPtr> sys$unlink(int dirfd, Userspace<char const*> pathname, size_t path_length, int flags);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$symlink(Userspace<Syscall::SC_symlink_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$rmdir(Userspace<char const*> pathname, size_t path_length);
|
|
|
|
ErrorOr<FlatPtr> sys$mount(Userspace<Syscall::SC_mount_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$umount(Userspace<char const*> mountpoint, size_t mountpoint_length);
|
2022-01-11 15:51:34 +00:00
|
|
|
ErrorOr<FlatPtr> sys$chmod(Userspace<Syscall::SC_chmod_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fchmod(int fd, mode_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$chown(Userspace<Syscall::SC_chown_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fchown(int fd, UserID, GroupID);
|
|
|
|
ErrorOr<FlatPtr> sys$fsync(int fd);
|
|
|
|
ErrorOr<FlatPtr> sys$socket(int domain, int type, int protocol);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$bind(int sockfd, Userspace<sockaddr const*> addr, socklen_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$listen(int sockfd, int backlog);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$accept4(Userspace<Syscall::SC_accept4_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$connect(int sockfd, Userspace<sockaddr const*>, socklen_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$shutdown(int sockfd, int how);
|
|
|
|
ErrorOr<FlatPtr> sys$sendmsg(int sockfd, Userspace<const struct msghdr*>, int flags);
|
|
|
|
ErrorOr<FlatPtr> sys$recvmsg(int sockfd, Userspace<struct msghdr*>, int flags);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getsockopt(Userspace<Syscall::SC_getsockopt_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$setsockopt(Userspace<Syscall::SC_setsockopt_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$getsockname(Userspace<Syscall::SC_getsockname_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$getpeername(Userspace<Syscall::SC_getpeername_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$socketpair(Userspace<Syscall::SC_socketpair_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sched_setparam(pid_t pid, Userspace<const struct sched_param*>);
|
|
|
|
ErrorOr<FlatPtr> sys$sched_getparam(pid_t pid, Userspace<struct sched_param*>);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$create_thread(void* (*)(void*), Userspace<Syscall::SC_create_thread_params const*>);
|
2021-05-28 09:20:22 +00:00
|
|
|
[[noreturn]] void sys$exit_thread(Userspace<void*>, Userspace<void*>, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$join_thread(pid_t tid, Userspace<void**> exit_value);
|
|
|
|
ErrorOr<FlatPtr> sys$detach_thread(pid_t tid);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_thread_name(pid_t tid, Userspace<char const*> buffer, size_t buffer_size);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$get_thread_name(pid_t tid, Userspace<char*> buffer, size_t buffer_size);
|
|
|
|
ErrorOr<FlatPtr> sys$kill_thread(pid_t tid, int signal);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$rename(Userspace<Syscall::SC_rename_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$mknod(Userspace<Syscall::SC_mknod_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$realpath(Userspace<Syscall::SC_realpath_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getrandom(Userspace<void*>, size_t, unsigned int);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$getkeymap(Userspace<Syscall::SC_getkeymap_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$setkeymap(Userspace<Syscall::SC_setkeymap_params const*>);
|
2022-02-18 21:12:35 +00:00
|
|
|
ErrorOr<FlatPtr> sys$profiling_enable(pid_t, Userspace<u64 const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$profiling_disable(pid_t);
|
|
|
|
ErrorOr<FlatPtr> sys$profiling_free_buffer(pid_t);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$futex(Userspace<Syscall::SC_futex_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$pledge(Userspace<Syscall::SC_pledge_params const*>);
|
|
|
|
ErrorOr<FlatPtr> sys$unveil(Userspace<Syscall::SC_unveil_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2);
|
|
|
|
ErrorOr<FlatPtr> sys$perf_register_string(Userspace<char const*>, size_t);
|
|
|
|
ErrorOr<FlatPtr> sys$get_stack_bounds(Userspace<FlatPtr*> stack_base, Userspace<size_t*> stack_size);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$ptrace(Userspace<Syscall::SC_ptrace_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$sendfd(int sockfd, int fd);
|
|
|
|
ErrorOr<FlatPtr> sys$recvfd(int sockfd, int options);
|
|
|
|
ErrorOr<FlatPtr> sys$sysconf(int name);
|
|
|
|
ErrorOr<FlatPtr> sys$disown(ProcessID);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$allocate_tls(Userspace<char const*> initial_data, size_t);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$prctl(int option, FlatPtr arg1, FlatPtr arg2);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$set_coredump_metadata(Userspace<Syscall::SC_set_coredump_metadata_params const*>);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$anon_create(size_t, int options);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> sys$statvfs(Userspace<Syscall::SC_statvfs_params const*> user_params);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<FlatPtr> sys$fstatvfs(int fd, statvfs* buf);
|
|
|
|
ErrorOr<FlatPtr> sys$map_time_page();
|
2019-02-16 11:13:43 +00:00
|
|
|
|
2020-01-27 20:11:36 +00:00
|
|
|
template<bool sockname, typename Params>
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> get_sock_or_peer_name(Params const&);
|
2020-01-27 20:11:36 +00:00
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
static void initialize();
|
|
|
|
|
2021-06-28 16:32:25 +00:00
|
|
|
[[noreturn]] void crash(int signal, FlatPtr ip, bool out_of_memory = false);
|
2021-10-31 21:54:39 +00:00
|
|
|
[[nodiscard]] siginfo_t wait_info() const;
|
2018-10-17 22:26:30 +00:00
|
|
|
|
2018-10-30 14:33:37 +00:00
|
|
|
const TTY* tty() const { return m_tty; }
|
2020-02-16 01:01:42 +00:00
|
|
|
void set_tty(TTY*);
|
2018-10-30 14:33:37 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u32 m_ticks_in_user { 0 };
|
|
|
|
u32 m_ticks_in_kernel { 0 };
|
2018-12-03 00:12:26 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u32 m_ticks_in_user_for_dead_children { 0 };
|
|
|
|
u32 m_ticks_in_kernel_for_dead_children { 0 };
|
2018-12-03 00:12:26 +00:00
|
|
|
|
2022-03-07 16:56:25 +00:00
|
|
|
NonnullRefPtr<Custody> current_directory();
|
2020-12-26 23:54:13 +00:00
|
|
|
Custody* executable() { return m_executable.ptr(); }
|
2022-04-01 17:58:27 +00:00
|
|
|
Custody const* executable() const { return m_executable.ptr(); }
|
2018-10-28 11:20:25 +00:00
|
|
|
|
2022-02-13 19:07:51 +00:00
|
|
|
static constexpr size_t max_arguments_size = Thread::default_userspace_stack_size / 8;
|
|
|
|
static constexpr size_t max_environment_size = Thread::default_userspace_stack_size / 8;
|
2021-09-09 09:36:40 +00:00
|
|
|
NonnullOwnPtrVector<KString> const& arguments() const { return m_arguments; };
|
|
|
|
NonnullOwnPtrVector<KString> const& environment() const { return m_environment; };
|
2021-01-15 19:21:03 +00:00
|
|
|
|
2022-01-13 22:12:51 +00:00
|
|
|
ErrorOr<void> exec(NonnullOwnPtr<KString> path, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, Thread*& new_main_thread, u32& prev_flags, int recursion_depth = 0);
|
2018-11-02 19:41:58 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<LoadResult> load(NonnullRefPtr<OpenFileDescription> main_program_description, RefPtr<OpenFileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header);
|
2020-10-10 09:13:21 +00:00
|
|
|
|
2021-03-10 18:59:46 +00:00
|
|
|
bool is_superuser() const { return euid() == 0; }
|
2018-11-07 00:38:51 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
void terminate_due_to_signal(u8 signal);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> send_signal(u8 signal, Process* sender);
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
u8 termination_signal() const { return m_protected_values.termination_signal; }
|
2022-02-26 11:59:31 +00:00
|
|
|
u8 termination_status() const { return m_protected_values.termination_status; }
|
2021-01-03 20:46:38 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
u16 thread_count() const
|
|
|
|
{
|
2021-08-07 19:30:06 +00:00
|
|
|
return m_protected_values.thread_count.load(AK::MemoryOrder::memory_order_relaxed);
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
2019-12-22 10:35:02 +00:00
|
|
|
|
2021-07-17 19:09:51 +00:00
|
|
|
Mutex& big_lock() { return m_big_lock; }
|
|
|
|
Mutex& ptrace_lock() { return m_ptrace_lock; }
|
2019-04-01 18:02:05 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
bool has_promises() const { return m_protected_values.has_promises; }
|
2021-11-06 21:06:08 +00:00
|
|
|
bool has_promised(Pledge pledge) const { return (m_protected_values.promises & (1U << (u32)pledge)) != 0; }
|
2020-01-11 19:48:43 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
VeilState veil_state() const
|
|
|
|
{
|
2022-03-07 20:23:08 +00:00
|
|
|
return m_unveil_data.with([&](auto const& unveil_data) { return unveil_data.state; });
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
|
2022-03-07 20:23:08 +00:00
|
|
|
struct UnveilData {
|
|
|
|
explicit UnveilData(UnveilNode&& p)
|
|
|
|
: paths(move(p))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
VeilState state { VeilState::None };
|
|
|
|
UnveilNode paths;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto& unveil_data() { return m_unveil_data; }
|
|
|
|
auto const& unveil_data() const { return m_unveil_data; }
|
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
bool wait_for_tracer_at_next_execve() const
|
|
|
|
{
|
|
|
|
return m_wait_for_tracer_at_next_execve;
|
|
|
|
}
|
|
|
|
void set_wait_for_tracer_at_next_execve(bool val)
|
|
|
|
{
|
|
|
|
m_wait_for_tracer_at_next_execve = val;
|
|
|
|
}
|
2020-04-07 15:23:37 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<void> peek_user_data(Span<u8> destination, Userspace<u8 const*> address);
|
|
|
|
ErrorOr<FlatPtr> peek_user_data(Userspace<FlatPtr const*> address);
|
2021-11-19 14:13:07 +00:00
|
|
|
ErrorOr<void> poke_user_data(Userspace<FlatPtr*> address, FlatPtr data);
|
2020-04-07 15:23:37 +00:00
|
|
|
|
2020-12-09 02:04:05 +00:00
|
|
|
void disowned_by_waiter(Process& process);
|
2020-12-09 04:18:45 +00:00
|
|
|
void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
|
2021-08-22 23:22:38 +00:00
|
|
|
Thread::WaitBlockerSet& wait_blocker_set() { return m_wait_blocker_set; }
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2021-08-05 21:43:10 +00:00
|
|
|
template<typename Callback>
|
2022-02-24 18:08:48 +00:00
|
|
|
ErrorOr<void> for_each_coredump_property(Callback callback) const
|
2021-08-05 21:43:10 +00:00
|
|
|
{
|
2022-04-09 17:30:20 +00:00
|
|
|
return m_coredump_properties.with([&](auto const& coredump_properties) -> ErrorOr<void> {
|
|
|
|
for (auto const& property : coredump_properties) {
|
|
|
|
if (property.key && property.value)
|
|
|
|
TRY(callback(*property.key, *property.value));
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
});
|
2021-08-05 21:43:10 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> set_coredump_property(NonnullOwnPtr<KString> key, NonnullOwnPtr<KString> value);
|
|
|
|
ErrorOr<void> try_set_coredump_property(StringView key, StringView value);
|
2020-12-30 14:19:57 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
NonnullRefPtrVector<Thread> const& threads_for_coredump(Badge<Coredump>) const { return m_threads_for_coredump; }
|
2021-01-28 07:41:18 +00:00
|
|
|
|
2021-01-11 08:52:18 +00:00
|
|
|
PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
|
2021-09-07 15:35:56 +00:00
|
|
|
PerformanceEventBuffer const* perf_events() const { return m_perf_event_buffer; }
|
2021-01-11 08:52:18 +00:00
|
|
|
|
2021-08-06 11:59:22 +00:00
|
|
|
Memory::AddressSpace& address_space() { return *m_space; }
|
|
|
|
Memory::AddressSpace const& address_space() const { return *m_space; }
|
2021-02-02 18:56:11 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
VirtualAddress signal_trampoline() const { return m_protected_values.signal_trampoline; }
|
2021-02-13 23:53:53 +00:00
|
|
|
|
2021-12-29 09:11:45 +00:00
|
|
|
ErrorOr<void> require_promise(Pledge);
|
|
|
|
ErrorOr<void> require_no_promises() const;
|
2021-09-04 20:26:06 +00:00
|
|
|
|
2022-02-25 01:24:57 +00:00
|
|
|
ErrorOr<void> validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr) const;
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<void> validate_inode_mmap_prot(int prot, Inode const& inode, bool map_shared) const;
|
2022-03-10 00:53:40 +00:00
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
private:
|
2018-10-18 11:05:00 +00:00
|
|
|
friend class MemoryManager;
|
2018-11-07 21:15:02 +00:00
|
|
|
friend class Scheduler;
|
2018-11-08 13:35:30 +00:00
|
|
|
friend class Region;
|
2021-05-07 05:29:19 +00:00
|
|
|
friend class PerformanceManager;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2021-01-23 06:24:33 +00:00
|
|
|
bool add_thread(Thread&);
|
|
|
|
bool remove_thread(Thread&);
|
|
|
|
|
2022-03-07 16:56:25 +00:00
|
|
|
Process(NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory, RefPtr<Custody> executable, TTY* tty, UnveilNode unveil_tree);
|
|
|
|
static ErrorOr<NonnullRefPtr<Process>> try_create(RefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> attach_resources(NonnullOwnPtr<Memory::AddressSpace>&&, RefPtr<Thread>& first_thread, Process* fork_parent);
|
2020-08-08 15:32:34 +00:00
|
|
|
static ProcessID allocate_pid();
|
2018-10-25 09:15:17 +00:00
|
|
|
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
void kill_threads_except_self();
|
|
|
|
void kill_all_threads();
|
2021-11-29 11:06:33 +00:00
|
|
|
ErrorOr<void> dump_core();
|
2022-01-12 21:21:08 +00:00
|
|
|
ErrorOr<void> dump_perfcore();
|
2021-03-02 15:55:54 +00:00
|
|
|
bool create_perf_events_buffer_if_needed();
|
2021-04-19 04:10:05 +00:00
|
|
|
void delete_perf_events_buffer();
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> do_exec(NonnullRefPtr<OpenFileDescription> main_program_description, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, RefPtr<OpenFileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const ElfW(Ehdr) & main_program_header);
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<FlatPtr> do_write(OpenFileDescription&, UserOrKernelBuffer const&, size_t);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-11-06 23:09:48 +00:00
|
|
|
ErrorOr<FlatPtr> do_statvfs(FileSystem const& path, Custody const*, statvfs* buf);
|
2021-05-19 09:31:43 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<RefPtr<OpenFileDescription>> find_elf_interpreter_for_executable(StringView path, ElfW(Ehdr) const& main_executable_header, size_t main_executable_header_size, size_t file_size);
|
Kernel: Tighten up exec/do_exec and allow for PT_INTERP iterpreters
This patch changes how exec() figures out which program image to
actually load. Previously, we opened the path to our main executable in
find_shebang_interpreter_for_executable, read the first page (or less,
if the file was smaller) and then decided whether to recurse with the
interpreter instead. We then then re-opened the main executable in
do_exec.
However, since we now want to parse the ELF header and Program Headers
of an elf image before even doing any memory region work, we can change
the way this whole process works. We open the file and read (up to) the
first page in exec() itself, then pass just the page and the amount read
to find_shebang_interpreter_for_executable. Since we now have that page
and the FileDescription for the main executable handy, we can do a few
things. First, validate the ELF header and ELF program headers for any
shenanigans. ELF32 Little Endian i386 only, please. Second, we can grab
the PT_INTERP interpreter from any ET_DYN files, and open that guy right
away if it exists. Finally, we can pass the main executable's and
optionally the PT_INTERP interpreter's file descriptions down to do_exec
and not have to feel guilty about opening the file twice.
In do_exec, we now have a choice. Are we going to load the main
executable, or the interpreter? We could load both, but it'll be way
easier for the inital pass on the RTLD if we only load the interpreter.
Then it can load the main executable itself like any old shared object,
just, the one with main in it :). Later on we can load both of them
into memory and the RTLD can relocate itself before trying to do
anything. The way it's written now the RTLD will get dibs on its
requested virtual addresses being the actual virtual addresses.
2020-01-11 01:28:02 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> do_kill(Process&, int signal);
|
|
|
|
ErrorOr<void> do_killpg(ProcessGroupID pgrp, int signal);
|
|
|
|
ErrorOr<void> do_killall(int signal);
|
|
|
|
ErrorOr<void> do_killself(int signal);
|
2019-11-14 16:16:30 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<siginfo_t> do_waitid(Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, int options);
|
2020-02-05 16:42:43 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Userspace<char const*> user_path, size_t path_length);
|
|
|
|
static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Syscall::StringArgument const&);
|
2020-01-06 10:05:59 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
bool has_tracee_thread(ProcessID tracer_pid);
|
2020-03-28 08:47:16 +00:00
|
|
|
|
2022-07-02 09:42:17 +00:00
|
|
|
void clear_signal_handlers_for_exec();
|
2020-12-22 06:21:58 +00:00
|
|
|
void clear_futex_queues_on_exec();
|
|
|
|
|
2021-10-28 20:33:41 +00:00
|
|
|
ErrorOr<void> remap_range_as_stack(FlatPtr address, size_t size);
|
|
|
|
|
2021-12-04 12:26:13 +00:00
|
|
|
ErrorOr<FlatPtr> read_impl(int fd, Userspace<u8*> buffer, size_t size);
|
|
|
|
|
2021-08-10 17:51:28 +00:00
|
|
|
public:
|
2021-08-14 12:43:34 +00:00
|
|
|
NonnullRefPtr<ProcessProcFSTraits> procfs_traits() const { return *m_procfs_traits; }
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> procfs_get_fds_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_perf_events(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_unveil_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_pledge_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_virtual_memory_stats(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_binary_link(KBufferBuilder& builder) const;
|
|
|
|
ErrorOr<void> procfs_get_current_work_directory_link(KBufferBuilder& builder) const;
|
2022-06-13 05:04:35 +00:00
|
|
|
ErrorOr<void> procfs_get_command_line(KBufferBuilder& builder) const;
|
2021-08-10 17:51:28 +00:00
|
|
|
mode_t binary_link_required_mode() const;
|
2022-01-15 22:04:24 +00:00
|
|
|
ErrorOr<void> procfs_get_thread_stack(ThreadID thread_id, KBufferBuilder& builder) const;
|
2021-11-18 14:11:31 +00:00
|
|
|
ErrorOr<void> traverse_stacks_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<NonnullRefPtr<Inode>> lookup_stacks_directory(ProcFS const&, StringView name) const;
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<size_t> procfs_get_file_description_link(unsigned fd, KBufferBuilder& builder) const;
|
2021-11-18 14:11:31 +00:00
|
|
|
ErrorOr<void> traverse_file_descriptions_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<NonnullRefPtr<Inode>> lookup_file_descriptions_directory(ProcFS const&, StringView name) const;
|
2022-05-04 15:22:18 +00:00
|
|
|
ErrorOr<NonnullRefPtr<Inode>> lookup_children_directory(ProcFS const&, StringView name) const;
|
|
|
|
ErrorOr<void> traverse_children_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
|
|
|
|
ErrorOr<size_t> procfs_get_child_proccess_link(ProcessID child_pid, KBufferBuilder& builder) const;
|
2021-08-10 17:51:28 +00:00
|
|
|
|
|
|
|
private:
|
2021-04-25 21:42:36 +00:00
|
|
|
inline PerformanceEventBuffer* current_perf_events_buffer()
|
|
|
|
{
|
2021-05-23 20:16:30 +00:00
|
|
|
if (g_profiling_all_threads)
|
|
|
|
return g_global_perf_events;
|
2021-10-31 22:51:08 +00:00
|
|
|
if (m_profiling)
|
2021-05-23 20:16:30 +00:00
|
|
|
return m_perf_event_buffer.ptr();
|
2021-10-31 22:51:08 +00:00
|
|
|
return nullptr;
|
2021-04-25 21:42:36 +00:00
|
|
|
}
|
|
|
|
|
2022-01-16 16:06:33 +00:00
|
|
|
IntrusiveListNode<Process> m_list_node;
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-09-07 10:53:28 +00:00
|
|
|
NonnullOwnPtr<KString> m_name;
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
OwnPtr<Memory::AddressSpace> m_space;
|
2021-02-08 14:45:40 +00:00
|
|
|
|
2020-08-15 19:13:19 +00:00
|
|
|
RefPtr<ProcessGroup> m_pg;
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-07-07 16:29:19 +00:00
|
|
|
AtomicEdgeAction<u32> m_protected_data_refs;
|
2021-03-10 18:59:46 +00:00
|
|
|
void protect_data();
|
|
|
|
void unprotect_data();
|
2020-06-17 12:58:00 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
OwnPtr<ThreadTracer> m_tracer;
|
|
|
|
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
public:
|
2021-09-07 11:39:11 +00:00
|
|
|
class OpenFileDescriptionAndFlags {
|
2020-07-30 21:50:31 +00:00
|
|
|
public:
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
bool is_valid() const { return !m_description.is_null(); }
|
2021-07-28 06:59:24 +00:00
|
|
|
bool is_allocated() const { return m_is_allocated; }
|
|
|
|
void allocate()
|
|
|
|
{
|
|
|
|
VERIFY(!m_is_allocated);
|
|
|
|
VERIFY(!is_valid());
|
|
|
|
m_is_allocated = true;
|
|
|
|
}
|
|
|
|
void deallocate()
|
|
|
|
{
|
|
|
|
VERIFY(m_is_allocated);
|
|
|
|
VERIFY(!is_valid());
|
|
|
|
m_is_allocated = false;
|
|
|
|
}
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescription* description() { return m_description; }
|
2022-04-01 17:58:27 +00:00
|
|
|
OpenFileDescription const* description() const { return m_description; }
|
2020-07-30 21:50:31 +00:00
|
|
|
u32 flags() const { return m_flags; }
|
|
|
|
void set_flags(u32 flags) { m_flags = flags; }
|
|
|
|
|
2019-04-29 02:55:54 +00:00
|
|
|
void clear();
|
2021-09-07 11:39:11 +00:00
|
|
|
void set(NonnullRefPtr<OpenFileDescription>&&, u32 flags = 0);
|
2020-07-30 21:50:31 +00:00
|
|
|
|
|
|
|
private:
|
2021-09-07 11:39:11 +00:00
|
|
|
RefPtr<OpenFileDescription> m_description;
|
2021-07-28 06:59:24 +00:00
|
|
|
bool m_is_allocated { false };
|
2020-07-30 21:50:31 +00:00
|
|
|
u32 m_flags { 0 };
|
2018-11-13 00:36:31 +00:00
|
|
|
};
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
|
2021-07-28 06:59:24 +00:00
|
|
|
class ScopedDescriptionAllocation;
|
2021-09-07 11:39:11 +00:00
|
|
|
class OpenFileDescriptions {
|
|
|
|
AK_MAKE_NONCOPYABLE(OpenFileDescriptions);
|
2022-01-29 00:22:28 +00:00
|
|
|
AK_MAKE_NONMOVABLE(OpenFileDescriptions);
|
2021-06-22 18:22:17 +00:00
|
|
|
friend class Process;
|
|
|
|
|
|
|
|
public:
|
2022-01-29 00:22:28 +00:00
|
|
|
OpenFileDescriptions() { }
|
2022-04-01 17:58:27 +00:00
|
|
|
ALWAYS_INLINE OpenFileDescriptionAndFlags const& operator[](size_t i) const { return at(i); }
|
2021-09-07 11:39:11 +00:00
|
|
|
ALWAYS_INLINE OpenFileDescriptionAndFlags& operator[](size_t i) { return at(i); }
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<void> try_clone(Kernel::Process::OpenFileDescriptions const& other)
|
2021-06-22 18:22:17 +00:00
|
|
|
{
|
2021-11-10 10:55:37 +00:00
|
|
|
TRY(try_resize(other.m_fds_metadatas.size()));
|
2021-08-13 08:37:07 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < other.m_fds_metadatas.size(); ++i) {
|
|
|
|
m_fds_metadatas[i] = other.m_fds_metadatas[i];
|
|
|
|
}
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-06-22 18:22:17 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
OpenFileDescriptionAndFlags const& at(size_t i) const;
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescriptionAndFlags& at(size_t i);
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescriptionAndFlags const* get_if_valid(size_t i) const;
|
|
|
|
OpenFileDescriptionAndFlags* get_if_valid(size_t i);
|
2021-08-16 02:02:48 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
void enumerate(Function<void(OpenFileDescriptionAndFlags const&)>) const;
|
|
|
|
ErrorOr<void> try_enumerate(Function<ErrorOr<void>(OpenFileDescriptionAndFlags const&)>) const;
|
2021-09-07 11:39:11 +00:00
|
|
|
void change_each(Function<void(OpenFileDescriptionAndFlags&)>);
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<ScopedDescriptionAllocation> allocate(int first_candidate_fd = 0);
|
2021-06-22 18:22:17 +00:00
|
|
|
size_t open_count() const;
|
|
|
|
|
2021-11-10 10:55:37 +00:00
|
|
|
ErrorOr<void> try_resize(size_t size) { return m_fds_metadatas.try_resize(size); }
|
2021-06-22 18:22:17 +00:00
|
|
|
|
2021-10-31 22:45:01 +00:00
|
|
|
static constexpr size_t max_open()
|
2021-06-22 18:22:17 +00:00
|
|
|
{
|
2021-10-31 22:45:01 +00:00
|
|
|
return s_max_open_file_descriptors;
|
2021-06-22 18:22:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
m_fds_metadatas.clear();
|
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<NonnullRefPtr<OpenFileDescription>> open_file_description(int fd) const;
|
2021-06-22 18:22:17 +00:00
|
|
|
|
|
|
|
private:
|
2021-10-31 22:45:01 +00:00
|
|
|
static constexpr size_t s_max_open_file_descriptors { FD_SETSIZE };
|
2021-09-07 11:39:11 +00:00
|
|
|
Vector<OpenFileDescriptionAndFlags> m_fds_metadatas;
|
2021-06-22 18:22:17 +00:00
|
|
|
};
|
|
|
|
|
2021-07-28 06:59:24 +00:00
|
|
|
class ScopedDescriptionAllocation {
|
|
|
|
AK_MAKE_NONCOPYABLE(ScopedDescriptionAllocation);
|
|
|
|
|
|
|
|
public:
|
|
|
|
ScopedDescriptionAllocation() = default;
|
2021-09-07 11:39:11 +00:00
|
|
|
ScopedDescriptionAllocation(int tracked_fd, OpenFileDescriptionAndFlags* description)
|
2021-07-28 06:59:24 +00:00
|
|
|
: fd(tracked_fd)
|
|
|
|
, m_description(description)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ScopedDescriptionAllocation(ScopedDescriptionAllocation&& other)
|
|
|
|
: fd(other.fd)
|
|
|
|
{
|
|
|
|
// Take over the responsibility of tracking to deallocation.
|
|
|
|
swap(m_description, other.m_description);
|
|
|
|
}
|
|
|
|
|
2022-01-29 00:22:28 +00:00
|
|
|
ScopedDescriptionAllocation& operator=(ScopedDescriptionAllocation&& other)
|
|
|
|
{
|
|
|
|
if (this != &other) {
|
|
|
|
m_description = exchange(other.m_description, nullptr);
|
|
|
|
fd = exchange(other.fd, -1);
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2021-07-28 06:59:24 +00:00
|
|
|
~ScopedDescriptionAllocation()
|
|
|
|
{
|
|
|
|
if (m_description && m_description->is_allocated() && !m_description->is_valid()) {
|
|
|
|
m_description->deallocate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 00:22:28 +00:00
|
|
|
int fd { -1 };
|
2021-07-28 06:59:24 +00:00
|
|
|
|
|
|
|
private:
|
2021-09-07 11:39:11 +00:00
|
|
|
OpenFileDescriptionAndFlags* m_description { nullptr };
|
2021-07-28 06:59:24 +00:00
|
|
|
};
|
|
|
|
|
2021-08-14 12:43:34 +00:00
|
|
|
class ProcessProcFSTraits : public ProcFSExposedComponent {
|
|
|
|
public:
|
2022-02-13 19:21:14 +00:00
|
|
|
static ErrorOr<NonnullRefPtr<ProcessProcFSTraits>> try_create(Badge<Process>, WeakPtr<Process> process)
|
2021-08-14 12:43:34 +00:00
|
|
|
{
|
2022-02-13 19:21:14 +00:00
|
|
|
return adopt_nonnull_ref_or_enomem(new (nothrow) ProcessProcFSTraits(move(process)));
|
2021-08-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual InodeIndex component_index() const override;
|
2022-04-01 17:58:27 +00:00
|
|
|
virtual ErrorOr<NonnullRefPtr<Inode>> to_inode(ProcFS const& procfs_instance) const override;
|
2021-11-18 14:11:31 +00:00
|
|
|
virtual ErrorOr<void> traverse_as_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)>) const override;
|
2021-08-14 12:43:34 +00:00
|
|
|
virtual mode_t required_mode() const override { return 0555; }
|
|
|
|
|
2021-08-28 20:11:16 +00:00
|
|
|
virtual UserID owner_user() const override;
|
|
|
|
virtual GroupID owner_group() const override;
|
2021-08-14 12:43:34 +00:00
|
|
|
|
|
|
|
private:
|
2022-02-13 19:21:14 +00:00
|
|
|
explicit ProcessProcFSTraits(WeakPtr<Process> process)
|
|
|
|
: m_process(move(process))
|
2021-08-14 12:43:34 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: We need to weakly hold on to the process, because otherwise
|
|
|
|
// we would be creating a reference cycle.
|
|
|
|
WeakPtr<Process> m_process;
|
|
|
|
};
|
|
|
|
|
2022-01-29 00:29:07 +00:00
|
|
|
MutexProtected<OpenFileDescriptions>& fds() { return m_fds; }
|
|
|
|
MutexProtected<OpenFileDescriptions> const& fds() const { return m_fds; }
|
2022-01-29 00:22:28 +00:00
|
|
|
|
|
|
|
ErrorOr<NonnullRefPtr<OpenFileDescription>> open_file_description(int fd)
|
|
|
|
{
|
2022-01-29 00:29:07 +00:00
|
|
|
return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
|
2022-01-29 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ErrorOr<NonnullRefPtr<OpenFileDescription>> open_file_description(int fd) const
|
|
|
|
{
|
2022-01-29 00:29:07 +00:00
|
|
|
return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
|
2022-01-29 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ErrorOr<ScopedDescriptionAllocation> allocate_fd()
|
|
|
|
{
|
2022-01-29 00:29:07 +00:00
|
|
|
return m_fds.with_exclusive([](auto& fds) { return fds.allocate(); });
|
2022-01-29 00:22:28 +00:00
|
|
|
}
|
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
|
|
|
|
|
|
|
private:
|
2021-08-21 23:37:17 +00:00
|
|
|
SpinlockProtected<Thread::ListInProcess>& thread_list() { return m_thread_list; }
|
|
|
|
SpinlockProtected<Thread::ListInProcess> const& thread_list() const { return m_thread_list; }
|
2021-08-07 11:28:18 +00:00
|
|
|
|
2021-08-21 23:37:17 +00:00
|
|
|
SpinlockProtected<Thread::ListInProcess> m_thread_list;
|
2018-11-07 17:30:59 +00:00
|
|
|
|
2022-01-29 00:29:07 +00:00
|
|
|
MutexProtected<OpenFileDescriptions> m_fds;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
bool const m_is_kernel_process;
|
2021-07-13 23:15:24 +00:00
|
|
|
Atomic<State> m_state { State::Running };
|
2019-12-11 19:36:56 +00:00
|
|
|
bool m_profiling { false };
|
2021-01-03 23:58:50 +00:00
|
|
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_stopped { false };
|
2021-08-22 12:51:04 +00:00
|
|
|
bool m_should_generate_coredump { false };
|
2019-08-08 12:56:50 +00:00
|
|
|
|
2019-06-21 16:37:47 +00:00
|
|
|
RefPtr<Custody> m_executable;
|
2022-03-07 16:56:25 +00:00
|
|
|
|
|
|
|
SpinlockProtected<RefPtr<Custody>> m_current_directory;
|
2018-10-24 12:28:22 +00:00
|
|
|
|
2021-09-09 09:36:40 +00:00
|
|
|
NonnullOwnPtrVector<KString> m_arguments;
|
|
|
|
NonnullOwnPtrVector<KString> m_environment;
|
2021-01-15 19:21:03 +00:00
|
|
|
|
2019-11-06 15:52:54 +00:00
|
|
|
RefPtr<TTY> m_tty;
|
2018-10-30 12:59:29 +00:00
|
|
|
|
2021-08-06 11:49:36 +00:00
|
|
|
WeakPtr<Memory::Region> m_master_tls_region;
|
2019-09-07 13:50:44 +00:00
|
|
|
size_t m_master_tls_size { 0 };
|
|
|
|
size_t m_master_tls_alignment { 0 };
|
|
|
|
|
2022-07-11 17:32:29 +00:00
|
|
|
Mutex m_big_lock { "Process"sv, Mutex::MutexBehavior::BigLock };
|
|
|
|
Mutex m_ptrace_lock { "ptrace"sv };
|
2019-06-07 09:30:07 +00:00
|
|
|
|
2020-12-01 22:44:52 +00:00
|
|
|
RefPtr<Timer> m_alarm_timer;
|
2019-07-29 05:26:01 +00:00
|
|
|
|
2022-03-07 20:23:08 +00:00
|
|
|
SpinlockProtected<UnveilData> m_unveil_data;
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
|
2020-02-02 19:26:27 +00:00
|
|
|
OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
|
2020-02-17 12:29:49 +00:00
|
|
|
|
2020-12-22 06:21:58 +00:00
|
|
|
FutexQueues m_futex_queues;
|
2021-09-05 17:02:03 +00:00
|
|
|
Spinlock m_futex_lock;
|
2020-12-22 06:21:58 +00:00
|
|
|
|
2020-03-28 08:47:16 +00:00
|
|
|
// This member is used in the implementation of ptrace's PT_TRACEME flag.
|
|
|
|
// If it is set to true, the process will stop at the next execve syscall
|
|
|
|
// and wait for a tracer to attach.
|
|
|
|
bool m_wait_for_tracer_at_next_execve { false };
|
2020-11-29 23:05:27 +00:00
|
|
|
|
2021-08-22 23:22:38 +00:00
|
|
|
Thread::WaitBlockerSet m_wait_blocker_set;
|
2020-12-30 14:19:57 +00:00
|
|
|
|
2021-08-05 21:43:10 +00:00
|
|
|
struct CoredumpProperty {
|
|
|
|
OwnPtr<KString> key;
|
|
|
|
OwnPtr<KString> value;
|
|
|
|
};
|
|
|
|
|
2022-04-09 17:30:20 +00:00
|
|
|
SpinlockProtected<Array<CoredumpProperty, 4>> m_coredump_properties;
|
2021-01-28 07:41:18 +00:00
|
|
|
NonnullRefPtrVector<Thread> m_threads_for_coredump;
|
2021-06-06 21:40:03 +00:00
|
|
|
|
2021-08-14 12:43:34 +00:00
|
|
|
mutable RefPtr<ProcessProcFSTraits> m_procfs_traits;
|
2022-02-24 18:55:49 +00:00
|
|
|
struct SignalActionData {
|
|
|
|
VirtualAddress handler_or_sigaction;
|
|
|
|
int flags { 0 };
|
|
|
|
u32 mask { 0 };
|
|
|
|
};
|
|
|
|
Array<SignalActionData, NSIG> m_signal_action_data;
|
2021-08-14 12:43:34 +00:00
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
static_assert(sizeof(ProtectedValues) < (PAGE_SIZE));
|
|
|
|
alignas(4096) ProtectedValues m_protected_values;
|
|
|
|
u8 m_protected_values_padding[PAGE_SIZE - sizeof(ProtectedValues)];
|
|
|
|
|
2021-06-06 21:40:03 +00:00
|
|
|
public:
|
2021-09-09 12:00:59 +00:00
|
|
|
using List = IntrusiveListRelaxedConst<&Process::m_list_node>;
|
2021-12-28 22:46:21 +00:00
|
|
|
static SpinlockProtected<Process::List>& all_instances();
|
2018-10-16 09:01:38 +00:00
|
|
|
};
|
|
|
|
|
2021-08-07 19:30:06 +00:00
|
|
|
// Note: Process object should be 2 pages of 4096 bytes each.
|
|
|
|
// It's not expected that the Process object will expand further because the first
|
|
|
|
// page is used for all unprotected values (which should be plenty of space for them).
|
|
|
|
// The second page is being used exclusively for write-protected values.
|
2021-09-05 07:57:53 +00:00
|
|
|
static_assert(AssertSize<Process, (PAGE_SIZE * 2)>());
|
2021-08-07 19:30:06 +00:00
|
|
|
|
2021-08-21 23:37:17 +00:00
|
|
|
extern RecursiveSpinlock g_profiling_lock;
|
2021-07-24 16:43:29 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Process&> Callback>
|
2018-11-08 15:09:05 +00:00
|
|
|
inline void Process::for_each(Callback callback)
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2022-04-01 17:58:27 +00:00
|
|
|
Process::all_instances().with([&](auto const& list) {
|
2021-07-24 16:43:29 +00:00
|
|
|
for (auto it = list.begin(); it != list.end();) {
|
|
|
|
auto& process = *it;
|
|
|
|
++it;
|
|
|
|
if (callback(process) == IterationDecision::Break)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
2018-11-08 15:09:05 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Process&> Callback>
|
2018-11-11 14:36:40 +00:00
|
|
|
inline void Process::for_each_child(Callback callback)
|
|
|
|
{
|
2020-08-08 15:32:34 +00:00
|
|
|
ProcessID my_pid = pid();
|
2022-04-01 17:58:27 +00:00
|
|
|
Process::all_instances().with([&](auto const& list) {
|
2021-07-24 16:43:29 +00:00
|
|
|
for (auto it = list.begin(); it != list.end();) {
|
|
|
|
auto& process = *it;
|
|
|
|
++it;
|
|
|
|
if (process.ppid() == my_pid || process.has_tracee_thread(pid())) {
|
|
|
|
if (callback(process) == IterationDecision::Break)
|
|
|
|
break;
|
|
|
|
}
|
2018-11-11 14:36:40 +00:00
|
|
|
}
|
2021-07-24 16:43:29 +00:00
|
|
|
});
|
2018-11-11 14:36:40 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2021-01-23 06:24:33 +00:00
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback) const
|
2018-11-08 15:09:05 +00:00
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
return thread_list().with([&](auto& thread_list) -> IterationDecision {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2018-11-08 15:09:05 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2021-03-11 13:12:55 +00:00
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback)
|
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
return thread_list().with([&](auto& thread_list) -> IterationDecision {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2021-03-11 13:12:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<IteratorFunction<Process&> Callback>
|
2020-08-08 20:04:20 +00:00
|
|
|
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
|
2018-11-08 15:09:05 +00:00
|
|
|
{
|
2022-04-01 17:58:27 +00:00
|
|
|
Process::all_instances().with([&](auto const& list) {
|
2021-07-24 16:43:29 +00:00
|
|
|
for (auto it = list.begin(); it != list.end();) {
|
|
|
|
auto& process = *it;
|
|
|
|
++it;
|
|
|
|
if (!process.is_dead() && process.pgid() == pgid) {
|
|
|
|
if (callback(process) == IterationDecision::Break)
|
|
|
|
break;
|
|
|
|
}
|
2019-03-23 21:03:17 +00:00
|
|
|
}
|
2021-07-24 16:43:29 +00:00
|
|
|
});
|
2018-11-08 15:09:05 +00:00
|
|
|
}
|
2019-02-21 14:45:31 +00:00
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<VoidFunction<Process&> Callback>
|
|
|
|
inline void Process::for_each(Callback callback)
|
|
|
|
{
|
|
|
|
return for_each([&](auto& item) {
|
|
|
|
callback(item);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
template<VoidFunction<Process&> Callback>
|
|
|
|
inline void Process::for_each_child(Callback callback)
|
|
|
|
{
|
|
|
|
return for_each_child([&](auto& item) {
|
|
|
|
callback(item);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback) const
|
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
thread_list().with([&](auto& thread_list) {
|
|
|
|
for (auto& thread : thread_list)
|
|
|
|
callback(thread);
|
|
|
|
});
|
2021-05-16 09:36:52 +00:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
}
|
|
|
|
|
2022-02-24 18:01:33 +00:00
|
|
|
inline ErrorOr<void> Process::try_for_each_thread(Function<ErrorOr<void>(Thread const&)> callback) const
|
|
|
|
{
|
|
|
|
return thread_list().with([&](auto& thread_list) -> ErrorOr<void> {
|
|
|
|
for (auto& thread : thread_list)
|
|
|
|
TRY(callback(thread));
|
|
|
|
return {};
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback)
|
|
|
|
{
|
2021-08-07 11:28:18 +00:00
|
|
|
thread_list().with([&](auto& thread_list) {
|
|
|
|
for (auto& thread : thread_list)
|
|
|
|
callback(thread);
|
|
|
|
});
|
2021-05-16 09:36:52 +00:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<VoidFunction<Process&> Callback>
|
|
|
|
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
|
|
|
|
{
|
|
|
|
return for_each_in_pgrp(pgid, [&](auto& item) {
|
|
|
|
callback(item);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
inline bool InodeMetadata::may_read(Process const& process) const
|
2019-02-21 14:45:31 +00:00
|
|
|
{
|
2020-01-02 22:45:52 +00:00
|
|
|
return may_read(process.euid(), process.egid(), process.extra_gids());
|
2019-02-21 14:45:31 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
inline bool InodeMetadata::may_write(Process const& process) const
|
2019-02-21 14:45:31 +00:00
|
|
|
{
|
2020-01-02 22:45:52 +00:00
|
|
|
return may_write(process.euid(), process.egid(), process.extra_gids());
|
2019-02-21 14:45:31 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
inline bool InodeMetadata::may_execute(Process const& process) const
|
2019-02-21 14:45:31 +00:00
|
|
|
{
|
2020-01-02 22:45:52 +00:00
|
|
|
return may_execute(process.euid(), process.egid(), process.extra_gids());
|
2019-02-21 14:45:31 +00:00
|
|
|
}
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
inline ProcessID Thread::pid() const
|
2019-03-23 21:03:17 +00:00
|
|
|
{
|
2020-08-02 02:04:56 +00:00
|
|
|
return m_process->pid();
|
2019-03-23 21:03:17 +00:00
|
|
|
}
|
2019-07-08 16:58:19 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|
2020-09-12 03:11:07 +00:00
|
|
|
|
2021-07-18 16:18:35 +00:00
|
|
|
#define VERIFY_PROCESS_BIG_LOCK_ACQUIRED(process) \
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(process->big_lock().is_exclusively_locked_by_current_thread());
|
2021-07-18 16:18:35 +00:00
|
|
|
|
|
|
|
#define VERIFY_NO_PROCESS_BIG_LOCK(process) \
|
2022-01-28 23:47:18 +00:00
|
|
|
VERIFY(!process->big_lock().is_exclusively_locked_by_current_thread());
|
2021-07-18 16:18:35 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
inline static ErrorOr<NonnullOwnPtr<KString>> try_copy_kstring_from_user(Kernel::Syscall::StringArgument const& string)
|
2021-05-28 07:29:16 +00:00
|
|
|
{
|
2021-08-13 05:04:31 +00:00
|
|
|
Userspace<char const*> characters((FlatPtr)string.characters);
|
|
|
|
return try_copy_kstring_from_user(characters, string.length);
|
2021-05-28 07:29:16 +00:00
|
|
|
}
|
|
|
|
|
2021-01-08 23:11:15 +00:00
|
|
|
template<>
|
2021-10-21 17:26:36 +00:00
|
|
|
struct AK::Formatter<Kernel::Process> : AK::Formatter<FormatString> {
|
2021-11-16 00:15:21 +00:00
|
|
|
ErrorOr<void> format(FormatBuilder& builder, Kernel::Process const& value)
|
2021-01-08 23:11:15 +00:00
|
|
|
{
|
2022-07-11 17:32:29 +00:00
|
|
|
return AK::Formatter<FormatString>::format(builder, "{}({})"sv, value.name(), value.pid().value());
|
2021-01-08 23:11:15 +00:00
|
|
|
}
|
|
|
|
};
|