Process.h 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/HashMap.h>
  9. #include <AK/IntrusiveList.h>
  10. #include <AK/IntrusiveListRelaxedConst.h>
  11. #include <AK/NonnullRefPtrVector.h>
  12. #include <AK/OwnPtr.h>
  13. #include <AK/String.h>
  14. #include <AK/Userspace.h>
  15. #include <AK/Variant.h>
  16. #include <AK/WeakPtr.h>
  17. #include <AK/Weakable.h>
  18. #include <Kernel/API/Syscall.h>
  19. #include <Kernel/Assertions.h>
  20. #include <Kernel/AtomicEdgeAction.h>
  21. #include <Kernel/FileSystem/InodeMetadata.h>
  22. #include <Kernel/FileSystem/OpenFileDescription.h>
  23. #include <Kernel/FileSystem/UnveilNode.h>
  24. #include <Kernel/Forward.h>
  25. #include <Kernel/FutexQueue.h>
  26. #include <Kernel/Locking/Mutex.h>
  27. #include <Kernel/Locking/MutexProtected.h>
  28. #include <Kernel/Memory/AddressSpace.h>
  29. #include <Kernel/PerformanceEventBuffer.h>
  30. #include <Kernel/ProcessExposed.h>
  31. #include <Kernel/ProcessGroup.h>
  32. #include <Kernel/StdLib.h>
  33. #include <Kernel/Thread.h>
  34. #include <Kernel/UnixTypes.h>
  35. #include <LibC/elf.h>
  36. #include <LibC/signal_numbers.h>
  37. namespace Kernel {
  38. MutexProtected<OwnPtr<KString>>& hostname();
  39. Time kgettimeofday();
  40. #define ENUMERATE_PLEDGE_PROMISES \
  41. __ENUMERATE_PLEDGE_PROMISE(stdio) \
  42. __ENUMERATE_PLEDGE_PROMISE(rpath) \
  43. __ENUMERATE_PLEDGE_PROMISE(wpath) \
  44. __ENUMERATE_PLEDGE_PROMISE(cpath) \
  45. __ENUMERATE_PLEDGE_PROMISE(dpath) \
  46. __ENUMERATE_PLEDGE_PROMISE(inet) \
  47. __ENUMERATE_PLEDGE_PROMISE(id) \
  48. __ENUMERATE_PLEDGE_PROMISE(proc) \
  49. __ENUMERATE_PLEDGE_PROMISE(ptrace) \
  50. __ENUMERATE_PLEDGE_PROMISE(exec) \
  51. __ENUMERATE_PLEDGE_PROMISE(unix) \
  52. __ENUMERATE_PLEDGE_PROMISE(recvfd) \
  53. __ENUMERATE_PLEDGE_PROMISE(sendfd) \
  54. __ENUMERATE_PLEDGE_PROMISE(fattr) \
  55. __ENUMERATE_PLEDGE_PROMISE(tty) \
  56. __ENUMERATE_PLEDGE_PROMISE(chown) \
  57. __ENUMERATE_PLEDGE_PROMISE(thread) \
  58. __ENUMERATE_PLEDGE_PROMISE(video) \
  59. __ENUMERATE_PLEDGE_PROMISE(accept) \
  60. __ENUMERATE_PLEDGE_PROMISE(settime) \
  61. __ENUMERATE_PLEDGE_PROMISE(sigaction) \
  62. __ENUMERATE_PLEDGE_PROMISE(setkeymap) \
  63. __ENUMERATE_PLEDGE_PROMISE(prot_exec) \
  64. __ENUMERATE_PLEDGE_PROMISE(map_fixed) \
  65. __ENUMERATE_PLEDGE_PROMISE(getkeymap)
  66. enum class Pledge : u32 {
  67. #define __ENUMERATE_PLEDGE_PROMISE(x) x,
  68. ENUMERATE_PLEDGE_PROMISES
  69. #undef __ENUMERATE_PLEDGE_PROMISE
  70. };
  71. enum class VeilState {
  72. None,
  73. Dropped,
  74. Locked,
  75. };
  76. using FutexQueues = HashMap<FlatPtr, RefPtr<FutexQueue>>;
  77. struct LoadResult;
  78. class Process final
  79. : public ListedRefCounted<Process, LockType::Spinlock>
  80. , public Weakable<Process> {
  81. class ProtectedValues {
  82. public:
  83. ProcessID pid { 0 };
  84. ProcessID ppid { 0 };
  85. SessionID sid { 0 };
  86. UserID euid { 0 };
  87. GroupID egid { 0 };
  88. UserID uid { 0 };
  89. GroupID gid { 0 };
  90. UserID suid { 0 };
  91. GroupID sgid { 0 };
  92. Vector<GroupID> extra_gids;
  93. bool dumpable { false };
  94. Atomic<bool> has_promises { false };
  95. Atomic<u32> promises { 0 };
  96. Atomic<bool> has_execpromises { false };
  97. Atomic<u32> execpromises { 0 };
  98. mode_t umask { 022 };
  99. VirtualAddress signal_trampoline;
  100. Atomic<u32> thread_count { 0 };
  101. u8 termination_status { 0 };
  102. u8 termination_signal { 0 };
  103. };
  104. public:
  105. AK_MAKE_NONCOPYABLE(Process);
  106. AK_MAKE_NONMOVABLE(Process);
  107. MAKE_ALIGNED_ALLOCATED(Process, PAGE_SIZE);
  108. friend class Thread;
  109. friend class Coredump;
  110. // Helper class to temporarily unprotect a process's protected data so you can write to it.
  111. class ProtectedDataMutationScope {
  112. public:
  113. explicit ProtectedDataMutationScope(Process& process)
  114. : m_process(process)
  115. {
  116. m_process.unprotect_data();
  117. }
  118. ~ProtectedDataMutationScope() { m_process.protect_data(); }
  119. private:
  120. Process& m_process;
  121. };
  122. enum class State : u8 {
  123. Running = 0,
  124. Dying,
  125. Dead
  126. };
  127. public:
  128. class ProcessProcFSTraits;
  129. inline static Process& current()
  130. {
  131. auto* current_thread = Processor::current_thread();
  132. VERIFY(current_thread);
  133. return current_thread->process();
  134. }
  135. inline static bool has_current()
  136. {
  137. return Processor::current_thread() != nullptr;
  138. }
  139. template<typename EntryFunction>
  140. static void kernel_process_trampoline(void* data)
  141. {
  142. EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
  143. (*func)();
  144. delete func;
  145. }
  146. enum class RegisterProcess {
  147. No,
  148. Yes
  149. };
  150. template<typename EntryFunction>
  151. static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes)
  152. {
  153. auto* entry_func = new EntryFunction(move(entry));
  154. return create_kernel_process(first_thread, move(name), &Process::kernel_process_trampoline<EntryFunction>, entry_func, affinity, do_register);
  155. }
  156. static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes);
  157. static ErrorOr<NonnullRefPtr<Process>> try_create_user_process(RefPtr<Thread>& first_thread, StringView path, UserID, GroupID, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, TTY*);
  158. static void register_new(Process&);
  159. ~Process();
  160. RefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
  161. bool is_profiling() const { return m_profiling; }
  162. void set_profiling(bool profiling) { m_profiling = profiling; }
  163. bool should_generate_coredump() const { return m_should_generate_coredump; }
  164. void set_should_generate_coredump(bool b) { m_should_generate_coredump = b; }
  165. bool is_dying() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) != State::Running; }
  166. bool is_dead() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) == State::Dead; }
  167. bool is_stopped() const { return m_is_stopped; }
  168. bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); }
  169. bool is_kernel_process() const { return m_is_kernel_process; }
  170. bool is_user_process() const { return !m_is_kernel_process; }
  171. static RefPtr<Process> from_pid(ProcessID);
  172. static SessionID get_sid_from_pgid(ProcessGroupID pgid);
  173. StringView name() const { return m_name->view(); }
  174. ProcessID pid() const { return m_protected_values.pid; }
  175. SessionID sid() const { return m_protected_values.sid; }
  176. bool is_session_leader() const { return sid().value() == pid().value(); }
  177. ProcessGroupID pgid() const { return m_pg ? m_pg->pgid() : 0; }
  178. bool is_group_leader() const { return pgid().value() == pid().value(); }
  179. Vector<GroupID> const& extra_gids() const { return m_protected_values.extra_gids; }
  180. UserID euid() const { return m_protected_values.euid; }
  181. GroupID egid() const { return m_protected_values.egid; }
  182. UserID uid() const { return m_protected_values.uid; }
  183. GroupID gid() const { return m_protected_values.gid; }
  184. UserID suid() const { return m_protected_values.suid; }
  185. GroupID sgid() const { return m_protected_values.sgid; }
  186. ProcessID ppid() const { return m_protected_values.ppid; }
  187. bool is_dumpable() const { return m_protected_values.dumpable; }
  188. void set_dumpable(bool);
  189. mode_t umask() const { return m_protected_values.umask; }
  190. bool in_group(GroupID) const;
  191. // Breakable iteration functions
  192. template<IteratorFunction<Process&> Callback>
  193. static void for_each(Callback);
  194. template<IteratorFunction<Process&> Callback>
  195. static void for_each_in_pgrp(ProcessGroupID, Callback);
  196. template<IteratorFunction<Process&> Callback>
  197. void for_each_child(Callback);
  198. template<IteratorFunction<Thread&> Callback>
  199. IterationDecision for_each_thread(Callback);
  200. template<IteratorFunction<Thread&> Callback>
  201. IterationDecision for_each_thread(Callback callback) const;
  202. // Non-breakable iteration functions
  203. template<VoidFunction<Process&> Callback>
  204. static void for_each(Callback);
  205. template<VoidFunction<Process&> Callback>
  206. static void for_each_in_pgrp(ProcessGroupID, Callback);
  207. template<VoidFunction<Process&> Callback>
  208. void for_each_child(Callback);
  209. template<VoidFunction<Thread&> Callback>
  210. IterationDecision for_each_thread(Callback);
  211. template<VoidFunction<Thread&> Callback>
  212. IterationDecision for_each_thread(Callback callback) const;
  213. void die();
  214. void finalize();
  215. ThreadTracer* tracer() { return m_tracer.ptr(); }
  216. bool is_traced() const { return !!m_tracer; }
  217. ErrorOr<void> start_tracing_from(ProcessID tracer);
  218. void stop_tracing();
  219. void tracer_trap(Thread&, const RegisterState&);
  220. ErrorOr<FlatPtr> sys$emuctl();
  221. ErrorOr<FlatPtr> sys$yield();
  222. ErrorOr<FlatPtr> sys$sync();
  223. ErrorOr<FlatPtr> sys$beep();
  224. ErrorOr<FlatPtr> sys$get_process_name(Userspace<char*> buffer, size_t buffer_size);
  225. ErrorOr<FlatPtr> sys$set_process_name(Userspace<const char*> user_name, size_t user_name_length);
  226. ErrorOr<FlatPtr> sys$create_inode_watcher(u32 flags);
  227. ErrorOr<FlatPtr> sys$inode_watcher_add_watch(Userspace<const Syscall::SC_inode_watcher_add_watch_params*> user_params);
  228. ErrorOr<FlatPtr> sys$inode_watcher_remove_watch(int fd, int wd);
  229. ErrorOr<FlatPtr> sys$dbgputstr(Userspace<const char*>, size_t);
  230. ErrorOr<FlatPtr> sys$dump_backtrace();
  231. ErrorOr<FlatPtr> sys$gettid();
  232. ErrorOr<FlatPtr> sys$setsid();
  233. ErrorOr<FlatPtr> sys$getsid(pid_t);
  234. ErrorOr<FlatPtr> sys$setpgid(pid_t pid, pid_t pgid);
  235. ErrorOr<FlatPtr> sys$getpgrp();
  236. ErrorOr<FlatPtr> sys$getpgid(pid_t);
  237. ErrorOr<FlatPtr> sys$getuid();
  238. ErrorOr<FlatPtr> sys$getgid();
  239. ErrorOr<FlatPtr> sys$geteuid();
  240. ErrorOr<FlatPtr> sys$getegid();
  241. ErrorOr<FlatPtr> sys$getpid();
  242. ErrorOr<FlatPtr> sys$getppid();
  243. ErrorOr<FlatPtr> sys$getresuid(Userspace<UserID*>, Userspace<UserID*>, Userspace<UserID*>);
  244. ErrorOr<FlatPtr> sys$getresgid(Userspace<GroupID*>, Userspace<GroupID*>, Userspace<GroupID*>);
  245. ErrorOr<FlatPtr> sys$umask(mode_t);
  246. ErrorOr<FlatPtr> sys$open(Userspace<const Syscall::SC_open_params*>);
  247. ErrorOr<FlatPtr> sys$close(int fd);
  248. ErrorOr<FlatPtr> sys$read(int fd, Userspace<u8*>, size_t);
  249. ErrorOr<FlatPtr> sys$pread(int fd, Userspace<u8*>, size_t, Userspace<off_t const*>);
  250. ErrorOr<FlatPtr> sys$readv(int fd, Userspace<const struct iovec*> iov, int iov_count);
  251. ErrorOr<FlatPtr> sys$write(int fd, Userspace<const u8*>, size_t);
  252. ErrorOr<FlatPtr> sys$writev(int fd, Userspace<const struct iovec*> iov, int iov_count);
  253. ErrorOr<FlatPtr> sys$fstat(int fd, Userspace<stat*>);
  254. ErrorOr<FlatPtr> sys$stat(Userspace<const Syscall::SC_stat_params*>);
  255. ErrorOr<FlatPtr> sys$lseek(int fd, Userspace<off_t*>, int whence);
  256. ErrorOr<FlatPtr> sys$ftruncate(int fd, Userspace<off_t const*>);
  257. ErrorOr<FlatPtr> sys$kill(pid_t pid_or_pgid, int sig);
  258. [[noreturn]] void sys$exit(int status);
  259. ErrorOr<FlatPtr> sys$sigreturn(RegisterState& registers);
  260. ErrorOr<FlatPtr> sys$waitid(Userspace<const Syscall::SC_waitid_params*>);
  261. ErrorOr<FlatPtr> sys$mmap(Userspace<const Syscall::SC_mmap_params*>);
  262. ErrorOr<FlatPtr> sys$mremap(Userspace<const Syscall::SC_mremap_params*>);
  263. ErrorOr<FlatPtr> sys$munmap(Userspace<void*>, size_t);
  264. ErrorOr<FlatPtr> sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*>);
  265. ErrorOr<FlatPtr> sys$mprotect(Userspace<void*>, size_t, int prot);
  266. ErrorOr<FlatPtr> sys$madvise(Userspace<void*>, size_t, int advice);
  267. ErrorOr<FlatPtr> sys$msyscall(Userspace<void*>);
  268. ErrorOr<FlatPtr> sys$msync(Userspace<void*>, size_t, int flags);
  269. ErrorOr<FlatPtr> sys$purge(int mode);
  270. ErrorOr<FlatPtr> sys$poll(Userspace<const Syscall::SC_poll_params*>);
  271. ErrorOr<FlatPtr> sys$get_dir_entries(int fd, Userspace<void*>, size_t);
  272. ErrorOr<FlatPtr> sys$getcwd(Userspace<char*>, size_t);
  273. ErrorOr<FlatPtr> sys$chdir(Userspace<const char*>, size_t);
  274. ErrorOr<FlatPtr> sys$fchdir(int fd);
  275. ErrorOr<FlatPtr> sys$adjtime(Userspace<const timeval*>, Userspace<timeval*>);
  276. ErrorOr<FlatPtr> sys$clock_gettime(clockid_t, Userspace<timespec*>);
  277. ErrorOr<FlatPtr> sys$clock_settime(clockid_t, Userspace<const timespec*>);
  278. ErrorOr<FlatPtr> sys$clock_nanosleep(Userspace<const Syscall::SC_clock_nanosleep_params*>);
  279. ErrorOr<FlatPtr> sys$gethostname(Userspace<char*>, size_t);
  280. ErrorOr<FlatPtr> sys$sethostname(Userspace<const char*>, size_t);
  281. ErrorOr<FlatPtr> sys$uname(Userspace<utsname*>);
  282. ErrorOr<FlatPtr> sys$readlink(Userspace<const Syscall::SC_readlink_params*>);
  283. ErrorOr<FlatPtr> sys$ttyname(int fd, Userspace<char*>, size_t);
  284. ErrorOr<FlatPtr> sys$ptsname(int fd, Userspace<char*>, size_t);
  285. ErrorOr<FlatPtr> sys$fork(RegisterState&);
  286. ErrorOr<FlatPtr> sys$execve(Userspace<const Syscall::SC_execve_params*>);
  287. ErrorOr<FlatPtr> sys$dup2(int old_fd, int new_fd);
  288. ErrorOr<FlatPtr> sys$sigaction(int signum, Userspace<const sigaction*> act, Userspace<sigaction*> old_act);
  289. ErrorOr<FlatPtr> sys$sigaltstack(Userspace<const stack_t*> ss, Userspace<stack_t*> old_ss);
  290. ErrorOr<FlatPtr> sys$sigprocmask(int how, Userspace<const sigset_t*> set, Userspace<sigset_t*> old_set);
  291. ErrorOr<FlatPtr> sys$sigpending(Userspace<sigset_t*>);
  292. ErrorOr<FlatPtr> sys$sigtimedwait(Userspace<sigset_t const*>, Userspace<siginfo_t*>, Userspace<const timespec*>);
  293. ErrorOr<FlatPtr> sys$getgroups(size_t, Userspace<gid_t*>);
  294. ErrorOr<FlatPtr> sys$setgroups(size_t, Userspace<const gid_t*>);
  295. ErrorOr<FlatPtr> sys$pipe(int pipefd[2], int flags);
  296. ErrorOr<FlatPtr> sys$killpg(pid_t pgrp, int sig);
  297. ErrorOr<FlatPtr> sys$seteuid(UserID);
  298. ErrorOr<FlatPtr> sys$setegid(GroupID);
  299. ErrorOr<FlatPtr> sys$setuid(UserID);
  300. ErrorOr<FlatPtr> sys$setgid(GroupID);
  301. ErrorOr<FlatPtr> sys$setreuid(UserID, UserID);
  302. ErrorOr<FlatPtr> sys$setresuid(UserID, UserID, UserID);
  303. ErrorOr<FlatPtr> sys$setresgid(GroupID, GroupID, GroupID);
  304. ErrorOr<FlatPtr> sys$alarm(unsigned seconds);
  305. ErrorOr<FlatPtr> sys$access(Userspace<const char*> pathname, size_t path_length, int mode);
  306. ErrorOr<FlatPtr> sys$fcntl(int fd, int cmd, u32 extra_arg);
  307. ErrorOr<FlatPtr> sys$ioctl(int fd, unsigned request, FlatPtr arg);
  308. ErrorOr<FlatPtr> sys$mkdir(Userspace<const char*> pathname, size_t path_length, mode_t mode);
  309. ErrorOr<FlatPtr> sys$times(Userspace<tms*>);
  310. ErrorOr<FlatPtr> sys$utime(Userspace<const char*> pathname, size_t path_length, Userspace<const struct utimbuf*>);
  311. ErrorOr<FlatPtr> sys$link(Userspace<const Syscall::SC_link_params*>);
  312. ErrorOr<FlatPtr> sys$unlink(Userspace<const char*> pathname, size_t path_length);
  313. ErrorOr<FlatPtr> sys$symlink(Userspace<const Syscall::SC_symlink_params*>);
  314. ErrorOr<FlatPtr> sys$rmdir(Userspace<const char*> pathname, size_t path_length);
  315. ErrorOr<FlatPtr> sys$mount(Userspace<const Syscall::SC_mount_params*>);
  316. ErrorOr<FlatPtr> sys$umount(Userspace<const char*> mountpoint, size_t mountpoint_length);
  317. ErrorOr<FlatPtr> sys$chmod(Userspace<Syscall::SC_chmod_params const*>);
  318. ErrorOr<FlatPtr> sys$fchmod(int fd, mode_t);
  319. ErrorOr<FlatPtr> sys$chown(Userspace<const Syscall::SC_chown_params*>);
  320. ErrorOr<FlatPtr> sys$fchown(int fd, UserID, GroupID);
  321. ErrorOr<FlatPtr> sys$fsync(int fd);
  322. ErrorOr<FlatPtr> sys$socket(int domain, int type, int protocol);
  323. ErrorOr<FlatPtr> sys$bind(int sockfd, Userspace<const sockaddr*> addr, socklen_t);
  324. ErrorOr<FlatPtr> sys$listen(int sockfd, int backlog);
  325. ErrorOr<FlatPtr> sys$accept4(Userspace<const Syscall::SC_accept4_params*>);
  326. ErrorOr<FlatPtr> sys$connect(int sockfd, Userspace<const sockaddr*>, socklen_t);
  327. ErrorOr<FlatPtr> sys$shutdown(int sockfd, int how);
  328. ErrorOr<FlatPtr> sys$sendmsg(int sockfd, Userspace<const struct msghdr*>, int flags);
  329. ErrorOr<FlatPtr> sys$recvmsg(int sockfd, Userspace<struct msghdr*>, int flags);
  330. ErrorOr<FlatPtr> sys$getsockopt(Userspace<const Syscall::SC_getsockopt_params*>);
  331. ErrorOr<FlatPtr> sys$setsockopt(Userspace<const Syscall::SC_setsockopt_params*>);
  332. ErrorOr<FlatPtr> sys$getsockname(Userspace<const Syscall::SC_getsockname_params*>);
  333. ErrorOr<FlatPtr> sys$getpeername(Userspace<const Syscall::SC_getpeername_params*>);
  334. ErrorOr<FlatPtr> sys$socketpair(Userspace<const Syscall::SC_socketpair_params*>);
  335. ErrorOr<FlatPtr> sys$sched_setparam(pid_t pid, Userspace<const struct sched_param*>);
  336. ErrorOr<FlatPtr> sys$sched_getparam(pid_t pid, Userspace<struct sched_param*>);
  337. ErrorOr<FlatPtr> sys$create_thread(void* (*)(void*), Userspace<const Syscall::SC_create_thread_params*>);
  338. [[noreturn]] void sys$exit_thread(Userspace<void*>, Userspace<void*>, size_t);
  339. ErrorOr<FlatPtr> sys$join_thread(pid_t tid, Userspace<void**> exit_value);
  340. ErrorOr<FlatPtr> sys$detach_thread(pid_t tid);
  341. ErrorOr<FlatPtr> sys$set_thread_name(pid_t tid, Userspace<const char*> buffer, size_t buffer_size);
  342. ErrorOr<FlatPtr> sys$get_thread_name(pid_t tid, Userspace<char*> buffer, size_t buffer_size);
  343. ErrorOr<FlatPtr> sys$kill_thread(pid_t tid, int signal);
  344. ErrorOr<FlatPtr> sys$rename(Userspace<const Syscall::SC_rename_params*>);
  345. ErrorOr<FlatPtr> sys$mknod(Userspace<const Syscall::SC_mknod_params*>);
  346. ErrorOr<FlatPtr> sys$realpath(Userspace<const Syscall::SC_realpath_params*>);
  347. ErrorOr<FlatPtr> sys$getrandom(Userspace<void*>, size_t, unsigned int);
  348. ErrorOr<FlatPtr> sys$getkeymap(Userspace<const Syscall::SC_getkeymap_params*>);
  349. ErrorOr<FlatPtr> sys$setkeymap(Userspace<const Syscall::SC_setkeymap_params*>);
  350. ErrorOr<FlatPtr> sys$profiling_enable(pid_t, u64);
  351. ErrorOr<FlatPtr> sys$profiling_disable(pid_t);
  352. ErrorOr<FlatPtr> sys$profiling_free_buffer(pid_t);
  353. ErrorOr<FlatPtr> sys$futex(Userspace<const Syscall::SC_futex_params*>);
  354. ErrorOr<FlatPtr> sys$pledge(Userspace<const Syscall::SC_pledge_params*>);
  355. ErrorOr<FlatPtr> sys$unveil(Userspace<const Syscall::SC_unveil_params*>);
  356. ErrorOr<FlatPtr> sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2);
  357. ErrorOr<FlatPtr> sys$perf_register_string(Userspace<char const*>, size_t);
  358. ErrorOr<FlatPtr> sys$get_stack_bounds(Userspace<FlatPtr*> stack_base, Userspace<size_t*> stack_size);
  359. ErrorOr<FlatPtr> sys$ptrace(Userspace<const Syscall::SC_ptrace_params*>);
  360. ErrorOr<FlatPtr> sys$sendfd(int sockfd, int fd);
  361. ErrorOr<FlatPtr> sys$recvfd(int sockfd, int options);
  362. ErrorOr<FlatPtr> sys$sysconf(int name);
  363. ErrorOr<FlatPtr> sys$disown(ProcessID);
  364. ErrorOr<FlatPtr> sys$allocate_tls(Userspace<const char*> initial_data, size_t);
  365. ErrorOr<FlatPtr> sys$prctl(int option, FlatPtr arg1, FlatPtr arg2);
  366. ErrorOr<FlatPtr> sys$set_coredump_metadata(Userspace<const Syscall::SC_set_coredump_metadata_params*>);
  367. ErrorOr<FlatPtr> sys$anon_create(size_t, int options);
  368. ErrorOr<FlatPtr> sys$statvfs(Userspace<const Syscall::SC_statvfs_params*> user_params);
  369. ErrorOr<FlatPtr> sys$fstatvfs(int fd, statvfs* buf);
  370. ErrorOr<FlatPtr> sys$map_time_page();
  371. template<bool sockname, typename Params>
  372. ErrorOr<void> get_sock_or_peer_name(Params const&);
  373. static void initialize();
  374. [[noreturn]] void crash(int signal, FlatPtr ip, bool out_of_memory = false);
  375. [[nodiscard]] siginfo_t wait_info() const;
  376. const TTY* tty() const { return m_tty; }
  377. void set_tty(TTY*);
  378. u32 m_ticks_in_user { 0 };
  379. u32 m_ticks_in_kernel { 0 };
  380. u32 m_ticks_in_user_for_dead_children { 0 };
  381. u32 m_ticks_in_kernel_for_dead_children { 0 };
  382. Custody& current_directory();
  383. Custody* executable() { return m_executable.ptr(); }
  384. const Custody* executable() const { return m_executable.ptr(); }
  385. NonnullOwnPtrVector<KString> const& arguments() const { return m_arguments; };
  386. NonnullOwnPtrVector<KString> const& environment() const { return m_environment; };
  387. ErrorOr<void> exec(NonnullOwnPtr<KString> path, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, Thread*& new_main_thread, u32& prev_flags, int recursion_depth = 0);
  388. ErrorOr<LoadResult> load(NonnullRefPtr<OpenFileDescription> main_program_description, RefPtr<OpenFileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header);
  389. bool is_superuser() const { return euid() == 0; }
  390. void terminate_due_to_signal(u8 signal);
  391. ErrorOr<void> send_signal(u8 signal, Process* sender);
  392. u8 termination_signal() const { return m_protected_values.termination_signal; }
  393. u16 thread_count() const
  394. {
  395. return m_protected_values.thread_count.load(AK::MemoryOrder::memory_order_relaxed);
  396. }
  397. Mutex& big_lock() { return m_big_lock; }
  398. Mutex& ptrace_lock() { return m_ptrace_lock; }
  399. bool has_promises() const { return m_protected_values.has_promises; }
  400. bool has_promised(Pledge pledge) const { return (m_protected_values.promises & (1U << (u32)pledge)) != 0; }
  401. VeilState veil_state() const
  402. {
  403. return m_veil_state;
  404. }
  405. const UnveilNode& unveiled_paths() const
  406. {
  407. return m_unveiled_paths;
  408. }
  409. bool wait_for_tracer_at_next_execve() const
  410. {
  411. return m_wait_for_tracer_at_next_execve;
  412. }
  413. void set_wait_for_tracer_at_next_execve(bool val)
  414. {
  415. m_wait_for_tracer_at_next_execve = val;
  416. }
  417. ErrorOr<void> peek_user_data(Span<u8> destination, Userspace<const u8*> address);
  418. ErrorOr<FlatPtr> peek_user_data(Userspace<const FlatPtr*> address);
  419. ErrorOr<void> poke_user_data(Userspace<FlatPtr*> address, FlatPtr data);
  420. void disowned_by_waiter(Process& process);
  421. void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
  422. Thread::WaitBlockerSet& wait_blocker_set() { return m_wait_blocker_set; }
  423. template<typename Callback>
  424. void for_each_coredump_property(Callback callback) const
  425. {
  426. for (auto const& property : m_coredump_properties) {
  427. if (property.key && property.value)
  428. callback(*property.key, *property.value);
  429. }
  430. }
  431. ErrorOr<void> set_coredump_property(NonnullOwnPtr<KString> key, NonnullOwnPtr<KString> value);
  432. ErrorOr<void> try_set_coredump_property(StringView key, StringView value);
  433. const NonnullRefPtrVector<Thread>& threads_for_coredump(Badge<Coredump>) const { return m_threads_for_coredump; }
  434. PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
  435. PerformanceEventBuffer const* perf_events() const { return m_perf_event_buffer; }
  436. Memory::AddressSpace& address_space() { return *m_space; }
  437. Memory::AddressSpace const& address_space() const { return *m_space; }
  438. VirtualAddress signal_trampoline() const { return m_protected_values.signal_trampoline; }
  439. ErrorOr<void> require_promise(Pledge);
  440. ErrorOr<void> require_no_promises() const;
  441. private:
  442. friend class MemoryManager;
  443. friend class Scheduler;
  444. friend class Region;
  445. friend class PerformanceManager;
  446. bool add_thread(Thread&);
  447. bool remove_thread(Thread&);
  448. Process(NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd, RefPtr<Custody> executable, TTY* tty);
  449. static ErrorOr<NonnullRefPtr<Process>> try_create(RefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
  450. ErrorOr<void> attach_resources(NonnullOwnPtr<Memory::AddressSpace>&&, RefPtr<Thread>& first_thread, Process* fork_parent);
  451. static ProcessID allocate_pid();
  452. void kill_threads_except_self();
  453. void kill_all_threads();
  454. ErrorOr<void> dump_core();
  455. ErrorOr<void> dump_perfcore();
  456. bool create_perf_events_buffer_if_needed();
  457. void delete_perf_events_buffer();
  458. ErrorOr<void> do_exec(NonnullRefPtr<OpenFileDescription> main_program_description, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, RefPtr<OpenFileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const ElfW(Ehdr) & main_program_header);
  459. ErrorOr<FlatPtr> do_write(OpenFileDescription&, const UserOrKernelBuffer&, size_t);
  460. ErrorOr<FlatPtr> do_statvfs(FileSystem const& path, Custody const*, statvfs* buf);
  461. ErrorOr<RefPtr<OpenFileDescription>> find_elf_interpreter_for_executable(StringView path, ElfW(Ehdr) const& main_executable_header, size_t main_executable_header_size, size_t file_size);
  462. ErrorOr<void> do_kill(Process&, int signal);
  463. ErrorOr<void> do_killpg(ProcessGroupID pgrp, int signal);
  464. ErrorOr<void> do_killall(int signal);
  465. ErrorOr<void> do_killself(int signal);
  466. ErrorOr<siginfo_t> do_waitid(Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, int options);
  467. static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Userspace<const char*> user_path, size_t path_length);
  468. static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(const Syscall::StringArgument&);
  469. bool has_tracee_thread(ProcessID tracer_pid);
  470. void clear_futex_queues_on_exec();
  471. ErrorOr<void> remap_range_as_stack(FlatPtr address, size_t size);
  472. public:
  473. NonnullRefPtr<ProcessProcFSTraits> procfs_traits() const { return *m_procfs_traits; }
  474. ErrorOr<void> procfs_get_fds_stats(KBufferBuilder& builder) const;
  475. ErrorOr<void> procfs_get_perf_events(KBufferBuilder& builder) const;
  476. ErrorOr<void> procfs_get_unveil_stats(KBufferBuilder& builder) const;
  477. ErrorOr<void> procfs_get_pledge_stats(KBufferBuilder& builder) const;
  478. ErrorOr<void> procfs_get_virtual_memory_stats(KBufferBuilder& builder) const;
  479. ErrorOr<void> procfs_get_binary_link(KBufferBuilder& builder) const;
  480. ErrorOr<void> procfs_get_current_work_directory_link(KBufferBuilder& builder) const;
  481. mode_t binary_link_required_mode() const;
  482. ErrorOr<void> procfs_get_thread_stack(ThreadID thread_id, KBufferBuilder& builder) const;
  483. ErrorOr<void> traverse_stacks_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
  484. ErrorOr<NonnullRefPtr<Inode>> lookup_stacks_directory(const ProcFS&, StringView name) const;
  485. ErrorOr<size_t> procfs_get_file_description_link(unsigned fd, KBufferBuilder& builder) const;
  486. ErrorOr<void> traverse_file_descriptions_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
  487. ErrorOr<NonnullRefPtr<Inode>> lookup_file_descriptions_directory(const ProcFS&, StringView name) const;
  488. ErrorOr<void> procfs_get_tty_link(KBufferBuilder& builder) const;
  489. private:
  490. inline PerformanceEventBuffer* current_perf_events_buffer()
  491. {
  492. if (g_profiling_all_threads)
  493. return g_global_perf_events;
  494. if (m_profiling)
  495. return m_perf_event_buffer.ptr();
  496. return nullptr;
  497. }
  498. IntrusiveListNode<Process> m_list_node;
  499. NonnullOwnPtr<KString> m_name;
  500. OwnPtr<Memory::AddressSpace> m_space;
  501. RefPtr<ProcessGroup> m_pg;
  502. AtomicEdgeAction<u32> m_protected_data_refs;
  503. void protect_data();
  504. void unprotect_data();
  505. OwnPtr<ThreadTracer> m_tracer;
  506. public:
  507. class OpenFileDescriptionAndFlags {
  508. public:
  509. bool is_valid() const { return !m_description.is_null(); }
  510. bool is_allocated() const { return m_is_allocated; }
  511. void allocate()
  512. {
  513. VERIFY(!m_is_allocated);
  514. VERIFY(!is_valid());
  515. m_is_allocated = true;
  516. }
  517. void deallocate()
  518. {
  519. VERIFY(m_is_allocated);
  520. VERIFY(!is_valid());
  521. m_is_allocated = false;
  522. }
  523. OpenFileDescription* description() { return m_description; }
  524. const OpenFileDescription* description() const { return m_description; }
  525. u32 flags() const { return m_flags; }
  526. void set_flags(u32 flags) { m_flags = flags; }
  527. void clear();
  528. void set(NonnullRefPtr<OpenFileDescription>&&, u32 flags = 0);
  529. private:
  530. RefPtr<OpenFileDescription> m_description;
  531. bool m_is_allocated { false };
  532. u32 m_flags { 0 };
  533. };
  534. class ScopedDescriptionAllocation;
  535. class OpenFileDescriptions {
  536. AK_MAKE_NONCOPYABLE(OpenFileDescriptions);
  537. AK_MAKE_NONMOVABLE(OpenFileDescriptions);
  538. friend class Process;
  539. public:
  540. OpenFileDescriptions() { }
  541. ALWAYS_INLINE const OpenFileDescriptionAndFlags& operator[](size_t i) const { return at(i); }
  542. ALWAYS_INLINE OpenFileDescriptionAndFlags& operator[](size_t i) { return at(i); }
  543. ErrorOr<void> try_clone(const Kernel::Process::OpenFileDescriptions& other)
  544. {
  545. TRY(try_resize(other.m_fds_metadatas.size()));
  546. for (size_t i = 0; i < other.m_fds_metadatas.size(); ++i) {
  547. m_fds_metadatas[i] = other.m_fds_metadatas[i];
  548. }
  549. return {};
  550. }
  551. const OpenFileDescriptionAndFlags& at(size_t i) const;
  552. OpenFileDescriptionAndFlags& at(size_t i);
  553. OpenFileDescriptionAndFlags const* get_if_valid(size_t i) const;
  554. OpenFileDescriptionAndFlags* get_if_valid(size_t i);
  555. void enumerate(Function<void(const OpenFileDescriptionAndFlags&)>) const;
  556. void change_each(Function<void(OpenFileDescriptionAndFlags&)>);
  557. ErrorOr<ScopedDescriptionAllocation> allocate(int first_candidate_fd = 0);
  558. size_t open_count() const;
  559. ErrorOr<void> try_resize(size_t size) { return m_fds_metadatas.try_resize(size); }
  560. static constexpr size_t max_open()
  561. {
  562. return s_max_open_file_descriptors;
  563. }
  564. void clear()
  565. {
  566. m_fds_metadatas.clear();
  567. }
  568. ErrorOr<NonnullRefPtr<OpenFileDescription>> open_file_description(int fd) const;
  569. private:
  570. static constexpr size_t s_max_open_file_descriptors { FD_SETSIZE };
  571. Vector<OpenFileDescriptionAndFlags> m_fds_metadatas;
  572. };
  573. class ScopedDescriptionAllocation {
  574. AK_MAKE_NONCOPYABLE(ScopedDescriptionAllocation);
  575. public:
  576. ScopedDescriptionAllocation() = default;
  577. ScopedDescriptionAllocation(int tracked_fd, OpenFileDescriptionAndFlags* description)
  578. : fd(tracked_fd)
  579. , m_description(description)
  580. {
  581. }
  582. ScopedDescriptionAllocation(ScopedDescriptionAllocation&& other)
  583. : fd(other.fd)
  584. {
  585. // Take over the responsibility of tracking to deallocation.
  586. swap(m_description, other.m_description);
  587. }
  588. ScopedDescriptionAllocation& operator=(ScopedDescriptionAllocation&& other)
  589. {
  590. if (this != &other) {
  591. m_description = exchange(other.m_description, nullptr);
  592. fd = exchange(other.fd, -1);
  593. }
  594. return *this;
  595. }
  596. ~ScopedDescriptionAllocation()
  597. {
  598. if (m_description && m_description->is_allocated() && !m_description->is_valid()) {
  599. m_description->deallocate();
  600. }
  601. }
  602. int fd { -1 };
  603. private:
  604. OpenFileDescriptionAndFlags* m_description { nullptr };
  605. };
  606. class ProcessProcFSTraits : public ProcFSExposedComponent {
  607. public:
  608. static ErrorOr<NonnullRefPtr<ProcessProcFSTraits>> try_create(Badge<Process>, Process& process)
  609. {
  610. return adopt_nonnull_ref_or_enomem(new (nothrow) ProcessProcFSTraits(process));
  611. }
  612. virtual InodeIndex component_index() const override;
  613. virtual ErrorOr<NonnullRefPtr<Inode>> to_inode(const ProcFS& procfs_instance) const override;
  614. virtual ErrorOr<void> traverse_as_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)>) const override;
  615. virtual mode_t required_mode() const override { return 0555; }
  616. virtual UserID owner_user() const override;
  617. virtual GroupID owner_group() const override;
  618. private:
  619. explicit ProcessProcFSTraits(Process& process)
  620. : m_process(process.make_weak_ptr())
  621. {
  622. }
  623. // NOTE: We need to weakly hold on to the process, because otherwise
  624. // we would be creating a reference cycle.
  625. WeakPtr<Process> m_process;
  626. };
  627. MutexProtected<OpenFileDescriptions>& fds() { return m_fds; }
  628. MutexProtected<OpenFileDescriptions> const& fds() const { return m_fds; }
  629. ErrorOr<NonnullRefPtr<OpenFileDescription>> open_file_description(int fd)
  630. {
  631. return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
  632. }
  633. ErrorOr<NonnullRefPtr<OpenFileDescription>> open_file_description(int fd) const
  634. {
  635. return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
  636. }
  637. ErrorOr<ScopedDescriptionAllocation> allocate_fd()
  638. {
  639. return m_fds.with_exclusive([](auto& fds) { return fds.allocate(); });
  640. }
  641. private:
  642. SpinlockProtected<Thread::ListInProcess>& thread_list() { return m_thread_list; }
  643. SpinlockProtected<Thread::ListInProcess> const& thread_list() const { return m_thread_list; }
  644. SpinlockProtected<Thread::ListInProcess> m_thread_list;
  645. MutexProtected<OpenFileDescriptions> m_fds;
  646. const bool m_is_kernel_process;
  647. Atomic<State> m_state { State::Running };
  648. bool m_profiling { false };
  649. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_stopped { false };
  650. bool m_should_generate_coredump { false };
  651. RefPtr<Custody> m_executable;
  652. RefPtr<Custody> m_cwd;
  653. NonnullOwnPtrVector<KString> m_arguments;
  654. NonnullOwnPtrVector<KString> m_environment;
  655. RefPtr<TTY> m_tty;
  656. WeakPtr<Memory::Region> m_master_tls_region;
  657. size_t m_master_tls_size { 0 };
  658. size_t m_master_tls_alignment { 0 };
  659. Mutex m_big_lock { "Process" };
  660. Mutex m_ptrace_lock { "ptrace" };
  661. RefPtr<Timer> m_alarm_timer;
  662. VeilState m_veil_state { VeilState::None };
  663. UnveilNode m_unveiled_paths { "/", { .full_path = "/" } };
  664. OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
  665. FutexQueues m_futex_queues;
  666. Spinlock m_futex_lock;
  667. // This member is used in the implementation of ptrace's PT_TRACEME flag.
  668. // If it is set to true, the process will stop at the next execve syscall
  669. // and wait for a tracer to attach.
  670. bool m_wait_for_tracer_at_next_execve { false };
  671. Thread::WaitBlockerSet m_wait_blocker_set;
  672. struct CoredumpProperty {
  673. OwnPtr<KString> key;
  674. OwnPtr<KString> value;
  675. };
  676. Array<CoredumpProperty, 4> m_coredump_properties;
  677. NonnullRefPtrVector<Thread> m_threads_for_coredump;
  678. mutable RefPtr<ProcessProcFSTraits> m_procfs_traits;
  679. static_assert(sizeof(ProtectedValues) < (PAGE_SIZE));
  680. alignas(4096) ProtectedValues m_protected_values;
  681. u8 m_protected_values_padding[PAGE_SIZE - sizeof(ProtectedValues)];
  682. public:
  683. using List = IntrusiveListRelaxedConst<&Process::m_list_node>;
  684. static SpinlockProtected<Process::List>& all_instances();
  685. };
  686. // Note: Process object should be 2 pages of 4096 bytes each.
  687. // It's not expected that the Process object will expand further because the first
  688. // page is used for all unprotected values (which should be plenty of space for them).
  689. // The second page is being used exclusively for write-protected values.
  690. static_assert(AssertSize<Process, (PAGE_SIZE * 2)>());
  691. extern RecursiveSpinlock g_profiling_lock;
  692. template<IteratorFunction<Process&> Callback>
  693. inline void Process::for_each(Callback callback)
  694. {
  695. VERIFY_INTERRUPTS_DISABLED();
  696. Process::all_instances().with([&](const auto& list) {
  697. for (auto it = list.begin(); it != list.end();) {
  698. auto& process = *it;
  699. ++it;
  700. if (callback(process) == IterationDecision::Break)
  701. break;
  702. }
  703. });
  704. }
  705. template<IteratorFunction<Process&> Callback>
  706. inline void Process::for_each_child(Callback callback)
  707. {
  708. ProcessID my_pid = pid();
  709. Process::all_instances().with([&](const auto& list) {
  710. for (auto it = list.begin(); it != list.end();) {
  711. auto& process = *it;
  712. ++it;
  713. if (process.ppid() == my_pid || process.has_tracee_thread(pid())) {
  714. if (callback(process) == IterationDecision::Break)
  715. break;
  716. }
  717. }
  718. });
  719. }
  720. template<IteratorFunction<Thread&> Callback>
  721. inline IterationDecision Process::for_each_thread(Callback callback) const
  722. {
  723. return thread_list().with([&](auto& thread_list) -> IterationDecision {
  724. for (auto& thread : thread_list) {
  725. IterationDecision decision = callback(thread);
  726. if (decision != IterationDecision::Continue)
  727. return decision;
  728. }
  729. return IterationDecision::Continue;
  730. });
  731. }
  732. template<IteratorFunction<Thread&> Callback>
  733. inline IterationDecision Process::for_each_thread(Callback callback)
  734. {
  735. return thread_list().with([&](auto& thread_list) -> IterationDecision {
  736. for (auto& thread : thread_list) {
  737. IterationDecision decision = callback(thread);
  738. if (decision != IterationDecision::Continue)
  739. return decision;
  740. }
  741. return IterationDecision::Continue;
  742. });
  743. }
  744. template<IteratorFunction<Process&> Callback>
  745. inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
  746. {
  747. Process::all_instances().with([&](const auto& list) {
  748. for (auto it = list.begin(); it != list.end();) {
  749. auto& process = *it;
  750. ++it;
  751. if (!process.is_dead() && process.pgid() == pgid) {
  752. if (callback(process) == IterationDecision::Break)
  753. break;
  754. }
  755. }
  756. });
  757. }
  758. template<VoidFunction<Process&> Callback>
  759. inline void Process::for_each(Callback callback)
  760. {
  761. return for_each([&](auto& item) {
  762. callback(item);
  763. return IterationDecision::Continue;
  764. });
  765. }
  766. template<VoidFunction<Process&> Callback>
  767. inline void Process::for_each_child(Callback callback)
  768. {
  769. return for_each_child([&](auto& item) {
  770. callback(item);
  771. return IterationDecision::Continue;
  772. });
  773. }
  774. template<VoidFunction<Thread&> Callback>
  775. inline IterationDecision Process::for_each_thread(Callback callback) const
  776. {
  777. thread_list().with([&](auto& thread_list) {
  778. for (auto& thread : thread_list)
  779. callback(thread);
  780. });
  781. return IterationDecision::Continue;
  782. }
  783. template<VoidFunction<Thread&> Callback>
  784. inline IterationDecision Process::for_each_thread(Callback callback)
  785. {
  786. thread_list().with([&](auto& thread_list) {
  787. for (auto& thread : thread_list)
  788. callback(thread);
  789. });
  790. return IterationDecision::Continue;
  791. }
  792. template<VoidFunction<Process&> Callback>
  793. inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
  794. {
  795. return for_each_in_pgrp(pgid, [&](auto& item) {
  796. callback(item);
  797. return IterationDecision::Continue;
  798. });
  799. }
  800. inline bool InodeMetadata::may_read(const Process& process) const
  801. {
  802. return may_read(process.euid(), process.egid(), process.extra_gids());
  803. }
  804. inline bool InodeMetadata::may_write(const Process& process) const
  805. {
  806. return may_write(process.euid(), process.egid(), process.extra_gids());
  807. }
  808. inline bool InodeMetadata::may_execute(const Process& process) const
  809. {
  810. return may_execute(process.euid(), process.egid(), process.extra_gids());
  811. }
  812. inline ProcessID Thread::pid() const
  813. {
  814. return m_process->pid();
  815. }
  816. }
  817. #define VERIFY_PROCESS_BIG_LOCK_ACQUIRED(process) \
  818. VERIFY(process->big_lock().is_locked_by_current_thread());
  819. #define VERIFY_NO_PROCESS_BIG_LOCK(process) \
  820. VERIFY(!process->big_lock().is_locked_by_current_thread());
  821. inline static ErrorOr<NonnullOwnPtr<KString>> try_copy_kstring_from_user(const Kernel::Syscall::StringArgument& string)
  822. {
  823. Userspace<char const*> characters((FlatPtr)string.characters);
  824. return try_copy_kstring_from_user(characters, string.length);
  825. }
  826. template<>
  827. struct AK::Formatter<Kernel::Process> : AK::Formatter<FormatString> {
  828. ErrorOr<void> format(FormatBuilder& builder, Kernel::Process const& value)
  829. {
  830. return AK::Formatter<FormatString>::format(builder, "{}({})", value.name(), value.pid().value());
  831. }
  832. };