Process.h 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/HashMap.h>
  9. #include <AK/IntrusiveList.h>
  10. #include <AK/IntrusiveListRelaxedConst.h>
  11. #include <AK/OwnPtr.h>
  12. #include <AK/RefPtr.h>
  13. #include <AK/Userspace.h>
  14. #include <AK/Variant.h>
  15. #include <Kernel/API/POSIX/sys/resource.h>
  16. #include <Kernel/API/Syscall.h>
  17. #include <Kernel/Assertions.h>
  18. #include <Kernel/AtomicEdgeAction.h>
  19. #include <Kernel/Credentials.h>
  20. #include <Kernel/FileSystem/InodeMetadata.h>
  21. #include <Kernel/FileSystem/OpenFileDescription.h>
  22. #include <Kernel/FileSystem/UnveilNode.h>
  23. #include <Kernel/Forward.h>
  24. #include <Kernel/FutexQueue.h>
  25. #include <Kernel/Jail.h>
  26. #include <Kernel/Library/LockWeakPtr.h>
  27. #include <Kernel/Library/LockWeakable.h>
  28. #include <Kernel/Library/NonnullLockRefPtrVector.h>
  29. #include <Kernel/Locking/Mutex.h>
  30. #include <Kernel/Locking/MutexProtected.h>
  31. #include <Kernel/Memory/AddressSpace.h>
  32. #include <Kernel/PerformanceEventBuffer.h>
  33. #include <Kernel/ProcessExposed.h>
  34. #include <Kernel/ProcessGroup.h>
  35. #include <Kernel/StdLib.h>
  36. #include <Kernel/Thread.h>
  37. #include <Kernel/UnixTypes.h>
  38. #include <LibC/elf.h>
  39. #include <LibC/signal_numbers.h>
  40. namespace Kernel {
  41. MutexProtected<OwnPtr<KString>>& hostname();
  42. Time kgettimeofday();
  43. #define ENUMERATE_PLEDGE_PROMISES \
  44. __ENUMERATE_PLEDGE_PROMISE(stdio) \
  45. __ENUMERATE_PLEDGE_PROMISE(rpath) \
  46. __ENUMERATE_PLEDGE_PROMISE(wpath) \
  47. __ENUMERATE_PLEDGE_PROMISE(cpath) \
  48. __ENUMERATE_PLEDGE_PROMISE(dpath) \
  49. __ENUMERATE_PLEDGE_PROMISE(inet) \
  50. __ENUMERATE_PLEDGE_PROMISE(id) \
  51. __ENUMERATE_PLEDGE_PROMISE(proc) \
  52. __ENUMERATE_PLEDGE_PROMISE(ptrace) \
  53. __ENUMERATE_PLEDGE_PROMISE(exec) \
  54. __ENUMERATE_PLEDGE_PROMISE(unix) \
  55. __ENUMERATE_PLEDGE_PROMISE(recvfd) \
  56. __ENUMERATE_PLEDGE_PROMISE(sendfd) \
  57. __ENUMERATE_PLEDGE_PROMISE(fattr) \
  58. __ENUMERATE_PLEDGE_PROMISE(tty) \
  59. __ENUMERATE_PLEDGE_PROMISE(chown) \
  60. __ENUMERATE_PLEDGE_PROMISE(thread) \
  61. __ENUMERATE_PLEDGE_PROMISE(video) \
  62. __ENUMERATE_PLEDGE_PROMISE(accept) \
  63. __ENUMERATE_PLEDGE_PROMISE(settime) \
  64. __ENUMERATE_PLEDGE_PROMISE(sigaction) \
  65. __ENUMERATE_PLEDGE_PROMISE(setkeymap) \
  66. __ENUMERATE_PLEDGE_PROMISE(prot_exec) \
  67. __ENUMERATE_PLEDGE_PROMISE(map_fixed) \
  68. __ENUMERATE_PLEDGE_PROMISE(getkeymap) \
  69. __ENUMERATE_PLEDGE_PROMISE(jail) \
  70. __ENUMERATE_PLEDGE_PROMISE(no_error)
  71. enum class Pledge : u32 {
  72. #define __ENUMERATE_PLEDGE_PROMISE(x) x,
  73. ENUMERATE_PLEDGE_PROMISES
  74. #undef __ENUMERATE_PLEDGE_PROMISE
  75. };
  76. enum class VeilState {
  77. None,
  78. Dropped,
  79. Locked,
  80. LockedInherited,
  81. };
  82. static constexpr FlatPtr futex_key_private_flag = 0b1;
  83. union GlobalFutexKey {
  84. struct {
  85. Memory::VMObject const* vmobject;
  86. FlatPtr offset;
  87. } shared;
  88. struct {
  89. Memory::AddressSpace const* address_space;
  90. FlatPtr user_address;
  91. } private_;
  92. struct {
  93. FlatPtr parent;
  94. FlatPtr offset;
  95. } raw;
  96. };
  97. static_assert(sizeof(GlobalFutexKey) == (sizeof(FlatPtr) * 2));
  98. struct LoadResult;
  99. class Process final
  100. : public ListedRefCounted<Process, LockType::Spinlock>
  101. , public LockWeakable<Process> {
  102. class ProtectedValues {
  103. public:
  104. ProcessID pid { 0 };
  105. ProcessID ppid { 0 };
  106. SessionID sid { 0 };
  107. // FIXME: This should be a NonnullRefPtr
  108. RefPtr<Credentials> credentials;
  109. bool dumpable { false };
  110. bool executable_is_setid { false };
  111. Atomic<bool> has_promises { false };
  112. Atomic<u32> promises { 0 };
  113. Atomic<bool> has_execpromises { false };
  114. Atomic<u32> execpromises { 0 };
  115. mode_t umask { 022 };
  116. VirtualAddress signal_trampoline;
  117. Atomic<u32> thread_count { 0 };
  118. u8 termination_status { 0 };
  119. u8 termination_signal { 0 };
  120. };
  121. public:
  122. AK_MAKE_NONCOPYABLE(Process);
  123. AK_MAKE_NONMOVABLE(Process);
  124. MAKE_ALIGNED_ALLOCATED(Process, PAGE_SIZE);
  125. friend class Thread;
  126. friend class Coredump;
  127. auto with_protected_data(auto&& callback) const
  128. {
  129. SpinlockLocker locker(m_protected_data_lock);
  130. return callback(m_protected_values_do_not_access_directly);
  131. }
  132. auto with_mutable_protected_data(auto&& callback)
  133. {
  134. SpinlockLocker locker(m_protected_data_lock);
  135. unprotect_data();
  136. auto guard = ScopeGuard([&] { protect_data(); });
  137. return callback(m_protected_values_do_not_access_directly);
  138. }
  139. enum class State : u8 {
  140. Running = 0,
  141. Dying,
  142. Dead
  143. };
  144. public:
  145. class ProcessProcFSTraits;
  146. static Process& current()
  147. {
  148. auto* current_thread = Processor::current_thread();
  149. VERIFY(current_thread);
  150. return current_thread->process();
  151. }
  152. static bool has_current()
  153. {
  154. return Processor::current_thread() != nullptr;
  155. }
  156. template<typename EntryFunction>
  157. static void kernel_process_trampoline(void* data)
  158. {
  159. EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
  160. (*func)();
  161. delete func;
  162. }
  163. enum class RegisterProcess {
  164. No,
  165. Yes
  166. };
  167. template<typename EntryFunction>
  168. static LockRefPtr<Process> create_kernel_process(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes)
  169. {
  170. auto* entry_func = new EntryFunction(move(entry));
  171. return create_kernel_process(first_thread, move(name), &Process::kernel_process_trampoline<EntryFunction>, entry_func, affinity, do_register);
  172. }
  173. static LockRefPtr<Process> create_kernel_process(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT, RegisterProcess do_register = RegisterProcess::Yes);
  174. static ErrorOr<NonnullLockRefPtr<Process>> try_create_user_process(LockRefPtr<Thread>& first_thread, StringView path, UserID, GroupID, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, TTY*);
  175. static void register_new(Process&);
  176. ~Process();
  177. LockRefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
  178. bool is_profiling() const { return m_profiling; }
  179. void set_profiling(bool profiling) { m_profiling = profiling; }
  180. bool should_generate_coredump() const { return m_should_generate_coredump; }
  181. void set_should_generate_coredump(bool b) { m_should_generate_coredump = b; }
  182. bool is_dying() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) != State::Running; }
  183. bool is_dead() const { return m_state.load(AK::MemoryOrder::memory_order_acquire) == State::Dead; }
  184. bool is_stopped() const { return m_is_stopped; }
  185. bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); }
  186. bool is_kernel_process() const { return m_is_kernel_process; }
  187. bool is_user_process() const { return !m_is_kernel_process; }
  188. static LockRefPtr<Process> from_pid_in_same_jail(ProcessID);
  189. static LockRefPtr<Process> from_pid_ignoring_jails(ProcessID);
  190. static SessionID get_sid_from_pgid(ProcessGroupID pgid);
  191. StringView name() const { return m_name->view(); }
  192. ProcessID pid() const
  193. {
  194. return with_protected_data([](auto& protected_data) { return protected_data.pid; });
  195. }
  196. SessionID sid() const
  197. {
  198. return with_protected_data([](auto& protected_data) { return protected_data.sid; });
  199. }
  200. bool is_session_leader() const { return sid().value() == pid().value(); }
  201. ProcessGroupID pgid() const { return m_pg ? m_pg->pgid() : 0; }
  202. bool is_group_leader() const { return pgid().value() == pid().value(); }
  203. ProcessID ppid() const
  204. {
  205. return with_protected_data([](auto& protected_data) { return protected_data.ppid; });
  206. }
  207. SpinlockProtected<RefPtr<Jail>, LockRank::Process>& jail() { return m_attached_jail; }
  208. bool is_currently_in_jail() const
  209. {
  210. return m_attached_jail.with([&](auto& jail) -> bool { return !jail.is_null(); });
  211. }
  212. NonnullRefPtr<Credentials> credentials() const;
  213. bool is_dumpable() const
  214. {
  215. return with_protected_data([](auto& protected_data) { return protected_data.dumpable; });
  216. }
  217. void set_dumpable(bool);
  218. mode_t umask() const
  219. {
  220. return with_protected_data([](auto& protected_data) { return protected_data.umask; });
  221. }
  222. // Breakable iteration functions
  223. template<IteratorFunction<Process&> Callback>
  224. static void for_each_ignoring_jails(Callback);
  225. static ErrorOr<void> for_each_in_same_jail(Function<ErrorOr<void>(Process&)>);
  226. ErrorOr<void> for_each_in_pgrp_in_same_jail(ProcessGroupID, Function<ErrorOr<void>(Process&)>);
  227. ErrorOr<void> for_each_child_in_same_jail(Function<ErrorOr<void>(Process&)>);
  228. template<IteratorFunction<Thread&> Callback>
  229. IterationDecision for_each_thread(Callback);
  230. template<IteratorFunction<Thread&> Callback>
  231. IterationDecision for_each_thread(Callback callback) const;
  232. ErrorOr<void> try_for_each_thread(Function<ErrorOr<void>(Thread const&)>) const;
  233. // Non-breakable iteration functions
  234. template<VoidFunction<Process&> Callback>
  235. static void for_each_ignoring_jails(Callback);
  236. template<VoidFunction<Thread&> Callback>
  237. IterationDecision for_each_thread(Callback);
  238. template<VoidFunction<Thread&> Callback>
  239. IterationDecision for_each_thread(Callback callback) const;
  240. void die();
  241. void finalize();
  242. ThreadTracer* tracer() { return m_tracer.ptr(); }
  243. bool is_traced() const { return !!m_tracer; }
  244. ErrorOr<void> start_tracing_from(ProcessID tracer);
  245. void stop_tracing();
  246. void tracer_trap(Thread&, RegisterState const&);
  247. ErrorOr<FlatPtr> sys$emuctl();
  248. ErrorOr<FlatPtr> sys$yield();
  249. ErrorOr<FlatPtr> sys$sync();
  250. ErrorOr<FlatPtr> sys$beep();
  251. ErrorOr<FlatPtr> sys$get_process_name(Userspace<char*> buffer, size_t buffer_size);
  252. ErrorOr<FlatPtr> sys$set_process_name(Userspace<char const*> user_name, size_t user_name_length);
  253. ErrorOr<FlatPtr> sys$create_inode_watcher(u32 flags);
  254. ErrorOr<FlatPtr> sys$inode_watcher_add_watch(Userspace<Syscall::SC_inode_watcher_add_watch_params const*> user_params);
  255. ErrorOr<FlatPtr> sys$inode_watcher_remove_watch(int fd, int wd);
  256. ErrorOr<FlatPtr> sys$dbgputstr(Userspace<char const*>, size_t);
  257. ErrorOr<FlatPtr> sys$dump_backtrace();
  258. ErrorOr<FlatPtr> sys$gettid();
  259. ErrorOr<FlatPtr> sys$setsid();
  260. ErrorOr<FlatPtr> sys$getsid(pid_t);
  261. ErrorOr<FlatPtr> sys$setpgid(pid_t pid, pid_t pgid);
  262. ErrorOr<FlatPtr> sys$getpgrp();
  263. ErrorOr<FlatPtr> sys$getpgid(pid_t);
  264. ErrorOr<FlatPtr> sys$getuid();
  265. ErrorOr<FlatPtr> sys$getgid();
  266. ErrorOr<FlatPtr> sys$geteuid();
  267. ErrorOr<FlatPtr> sys$getegid();
  268. ErrorOr<FlatPtr> sys$getpid();
  269. ErrorOr<FlatPtr> sys$getppid();
  270. ErrorOr<FlatPtr> sys$getresuid(Userspace<UserID*>, Userspace<UserID*>, Userspace<UserID*>);
  271. ErrorOr<FlatPtr> sys$getresgid(Userspace<GroupID*>, Userspace<GroupID*>, Userspace<GroupID*>);
  272. ErrorOr<FlatPtr> sys$getrusage(int, Userspace<rusage*>);
  273. ErrorOr<FlatPtr> sys$umask(mode_t);
  274. ErrorOr<FlatPtr> sys$open(Userspace<Syscall::SC_open_params const*>);
  275. ErrorOr<FlatPtr> sys$close(int fd);
  276. ErrorOr<FlatPtr> sys$read(int fd, Userspace<u8*>, size_t);
  277. ErrorOr<FlatPtr> sys$pread(int fd, Userspace<u8*>, size_t, Userspace<off_t const*>);
  278. ErrorOr<FlatPtr> sys$readv(int fd, Userspace<const struct iovec*> iov, int iov_count);
  279. ErrorOr<FlatPtr> sys$write(int fd, Userspace<u8 const*>, size_t);
  280. ErrorOr<FlatPtr> sys$pwritev(int fd, Userspace<const struct iovec*> iov, int iov_count, Userspace<off_t const*>);
  281. ErrorOr<FlatPtr> sys$fstat(int fd, Userspace<stat*>);
  282. ErrorOr<FlatPtr> sys$stat(Userspace<Syscall::SC_stat_params const*>);
  283. ErrorOr<FlatPtr> sys$annotate_mapping(Userspace<void*>, int flags);
  284. ErrorOr<FlatPtr> sys$lseek(int fd, Userspace<off_t*>, int whence);
  285. ErrorOr<FlatPtr> sys$ftruncate(int fd, Userspace<off_t const*>);
  286. ErrorOr<FlatPtr> sys$posix_fallocate(int fd, Userspace<off_t const*>, Userspace<off_t const*>);
  287. ErrorOr<FlatPtr> sys$kill(pid_t pid_or_pgid, int sig);
  288. [[noreturn]] void sys$exit(int status);
  289. ErrorOr<FlatPtr> sys$sigreturn(RegisterState& registers);
  290. ErrorOr<FlatPtr> sys$waitid(Userspace<Syscall::SC_waitid_params const*>);
  291. ErrorOr<FlatPtr> sys$mmap(Userspace<Syscall::SC_mmap_params const*>);
  292. ErrorOr<FlatPtr> sys$mremap(Userspace<Syscall::SC_mremap_params const*>);
  293. ErrorOr<FlatPtr> sys$munmap(Userspace<void*>, size_t);
  294. ErrorOr<FlatPtr> sys$set_mmap_name(Userspace<Syscall::SC_set_mmap_name_params const*>);
  295. ErrorOr<FlatPtr> sys$mprotect(Userspace<void*>, size_t, int prot);
  296. ErrorOr<FlatPtr> sys$madvise(Userspace<void*>, size_t, int advice);
  297. ErrorOr<FlatPtr> sys$msync(Userspace<void*>, size_t, int flags);
  298. ErrorOr<FlatPtr> sys$purge(int mode);
  299. ErrorOr<FlatPtr> sys$poll(Userspace<Syscall::SC_poll_params const*>);
  300. ErrorOr<FlatPtr> sys$get_dir_entries(int fd, Userspace<void*>, size_t);
  301. ErrorOr<FlatPtr> sys$getcwd(Userspace<char*>, size_t);
  302. ErrorOr<FlatPtr> sys$chdir(Userspace<char const*>, size_t);
  303. ErrorOr<FlatPtr> sys$fchdir(int fd);
  304. ErrorOr<FlatPtr> sys$adjtime(Userspace<timeval const*>, Userspace<timeval*>);
  305. ErrorOr<FlatPtr> sys$clock_gettime(clockid_t, Userspace<timespec*>);
  306. ErrorOr<FlatPtr> sys$clock_settime(clockid_t, Userspace<timespec const*>);
  307. ErrorOr<FlatPtr> sys$clock_nanosleep(Userspace<Syscall::SC_clock_nanosleep_params const*>);
  308. ErrorOr<FlatPtr> sys$clock_getres(Userspace<Syscall::SC_clock_getres_params const*>);
  309. ErrorOr<FlatPtr> sys$gethostname(Userspace<char*>, size_t);
  310. ErrorOr<FlatPtr> sys$sethostname(Userspace<char const*>, size_t);
  311. ErrorOr<FlatPtr> sys$uname(Userspace<utsname*>);
  312. ErrorOr<FlatPtr> sys$readlink(Userspace<Syscall::SC_readlink_params const*>);
  313. ErrorOr<FlatPtr> sys$fork(RegisterState&);
  314. ErrorOr<FlatPtr> sys$execve(Userspace<Syscall::SC_execve_params const*>);
  315. ErrorOr<FlatPtr> sys$dup2(int old_fd, int new_fd);
  316. ErrorOr<FlatPtr> sys$sigaction(int signum, Userspace<sigaction const*> act, Userspace<sigaction*> old_act);
  317. ErrorOr<FlatPtr> sys$sigaltstack(Userspace<stack_t const*> ss, Userspace<stack_t*> old_ss);
  318. ErrorOr<FlatPtr> sys$sigprocmask(int how, Userspace<sigset_t const*> set, Userspace<sigset_t*> old_set);
  319. ErrorOr<FlatPtr> sys$sigpending(Userspace<sigset_t*>);
  320. ErrorOr<FlatPtr> sys$sigsuspend(Userspace<sigset_t const*>);
  321. ErrorOr<FlatPtr> sys$sigtimedwait(Userspace<sigset_t const*>, Userspace<siginfo_t*>, Userspace<timespec const*>);
  322. ErrorOr<FlatPtr> sys$getgroups(size_t, Userspace<GroupID*>);
  323. ErrorOr<FlatPtr> sys$setgroups(size_t, Userspace<GroupID const*>);
  324. ErrorOr<FlatPtr> sys$pipe(Userspace<int*>, int flags);
  325. ErrorOr<FlatPtr> sys$killpg(pid_t pgrp, int sig);
  326. ErrorOr<FlatPtr> sys$seteuid(UserID);
  327. ErrorOr<FlatPtr> sys$setegid(GroupID);
  328. ErrorOr<FlatPtr> sys$setuid(UserID);
  329. ErrorOr<FlatPtr> sys$setgid(GroupID);
  330. ErrorOr<FlatPtr> sys$setreuid(UserID, UserID);
  331. ErrorOr<FlatPtr> sys$setresuid(UserID, UserID, UserID);
  332. ErrorOr<FlatPtr> sys$setregid(GroupID, GroupID);
  333. ErrorOr<FlatPtr> sys$setresgid(GroupID, GroupID, GroupID);
  334. ErrorOr<FlatPtr> sys$alarm(unsigned seconds);
  335. ErrorOr<FlatPtr> sys$faccessat(Userspace<Syscall::SC_faccessat_params const*>);
  336. ErrorOr<FlatPtr> sys$fcntl(int fd, int cmd, uintptr_t extra_arg);
  337. ErrorOr<FlatPtr> sys$ioctl(int fd, unsigned request, FlatPtr arg);
  338. ErrorOr<FlatPtr> sys$mkdir(int dirfd, Userspace<char const*> pathname, size_t path_length, mode_t mode);
  339. ErrorOr<FlatPtr> sys$times(Userspace<tms*>);
  340. ErrorOr<FlatPtr> sys$utime(Userspace<char const*> pathname, size_t path_length, Userspace<const struct utimbuf*>);
  341. ErrorOr<FlatPtr> sys$utimensat(Userspace<Syscall::SC_utimensat_params const*>);
  342. ErrorOr<FlatPtr> sys$link(Userspace<Syscall::SC_link_params const*>);
  343. ErrorOr<FlatPtr> sys$unlink(int dirfd, Userspace<char const*> pathname, size_t path_length, int flags);
  344. ErrorOr<FlatPtr> sys$symlink(Userspace<Syscall::SC_symlink_params const*>);
  345. ErrorOr<FlatPtr> sys$rmdir(Userspace<char const*> pathname, size_t path_length);
  346. ErrorOr<FlatPtr> sys$mount(Userspace<Syscall::SC_mount_params const*>);
  347. ErrorOr<FlatPtr> sys$umount(Userspace<char const*> mountpoint, size_t mountpoint_length);
  348. ErrorOr<FlatPtr> sys$chmod(Userspace<Syscall::SC_chmod_params const*>);
  349. ErrorOr<FlatPtr> sys$fchmod(int fd, mode_t);
  350. ErrorOr<FlatPtr> sys$chown(Userspace<Syscall::SC_chown_params const*>);
  351. ErrorOr<FlatPtr> sys$fchown(int fd, UserID, GroupID);
  352. ErrorOr<FlatPtr> sys$fsync(int fd);
  353. ErrorOr<FlatPtr> sys$socket(int domain, int type, int protocol);
  354. ErrorOr<FlatPtr> sys$bind(int sockfd, Userspace<sockaddr const*> addr, socklen_t);
  355. ErrorOr<FlatPtr> sys$listen(int sockfd, int backlog);
  356. ErrorOr<FlatPtr> sys$accept4(Userspace<Syscall::SC_accept4_params const*>);
  357. ErrorOr<FlatPtr> sys$connect(int sockfd, Userspace<sockaddr const*>, socklen_t);
  358. ErrorOr<FlatPtr> sys$shutdown(int sockfd, int how);
  359. ErrorOr<FlatPtr> sys$sendmsg(int sockfd, Userspace<const struct msghdr*>, int flags);
  360. ErrorOr<FlatPtr> sys$recvmsg(int sockfd, Userspace<struct msghdr*>, int flags);
  361. ErrorOr<FlatPtr> sys$getsockopt(Userspace<Syscall::SC_getsockopt_params const*>);
  362. ErrorOr<FlatPtr> sys$setsockopt(Userspace<Syscall::SC_setsockopt_params const*>);
  363. ErrorOr<FlatPtr> sys$getsockname(Userspace<Syscall::SC_getsockname_params const*>);
  364. ErrorOr<FlatPtr> sys$getpeername(Userspace<Syscall::SC_getpeername_params const*>);
  365. ErrorOr<FlatPtr> sys$socketpair(Userspace<Syscall::SC_socketpair_params const*>);
  366. ErrorOr<FlatPtr> sys$scheduler_set_parameters(Userspace<Syscall::SC_scheduler_parameters_params const*>);
  367. ErrorOr<FlatPtr> sys$scheduler_get_parameters(Userspace<Syscall::SC_scheduler_parameters_params*>);
  368. ErrorOr<FlatPtr> sys$create_thread(void* (*)(void*), Userspace<Syscall::SC_create_thread_params const*>);
  369. [[noreturn]] void sys$exit_thread(Userspace<void*>, Userspace<void*>, size_t);
  370. ErrorOr<FlatPtr> sys$join_thread(pid_t tid, Userspace<void**> exit_value);
  371. ErrorOr<FlatPtr> sys$detach_thread(pid_t tid);
  372. ErrorOr<FlatPtr> sys$set_thread_name(pid_t tid, Userspace<char const*> buffer, size_t buffer_size);
  373. ErrorOr<FlatPtr> sys$get_thread_name(pid_t tid, Userspace<char*> buffer, size_t buffer_size);
  374. ErrorOr<FlatPtr> sys$kill_thread(pid_t tid, int signal);
  375. ErrorOr<FlatPtr> sys$rename(Userspace<Syscall::SC_rename_params const*>);
  376. ErrorOr<FlatPtr> sys$mknod(Userspace<Syscall::SC_mknod_params const*>);
  377. ErrorOr<FlatPtr> sys$realpath(Userspace<Syscall::SC_realpath_params const*>);
  378. ErrorOr<FlatPtr> sys$getrandom(Userspace<void*>, size_t, unsigned int);
  379. ErrorOr<FlatPtr> sys$getkeymap(Userspace<Syscall::SC_getkeymap_params const*>);
  380. ErrorOr<FlatPtr> sys$setkeymap(Userspace<Syscall::SC_setkeymap_params const*>);
  381. ErrorOr<FlatPtr> sys$profiling_enable(pid_t, Userspace<u64 const*>);
  382. ErrorOr<FlatPtr> profiling_enable(pid_t, u64 event_mask);
  383. ErrorOr<FlatPtr> sys$profiling_disable(pid_t);
  384. ErrorOr<FlatPtr> sys$profiling_free_buffer(pid_t);
  385. ErrorOr<FlatPtr> sys$futex(Userspace<Syscall::SC_futex_params const*>);
  386. ErrorOr<FlatPtr> sys$pledge(Userspace<Syscall::SC_pledge_params const*>);
  387. ErrorOr<FlatPtr> sys$unveil(Userspace<Syscall::SC_unveil_params const*>);
  388. ErrorOr<FlatPtr> sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2);
  389. ErrorOr<FlatPtr> sys$perf_register_string(Userspace<char const*>, size_t);
  390. ErrorOr<FlatPtr> sys$get_stack_bounds(Userspace<FlatPtr*> stack_base, Userspace<size_t*> stack_size);
  391. ErrorOr<FlatPtr> sys$ptrace(Userspace<Syscall::SC_ptrace_params const*>);
  392. ErrorOr<FlatPtr> sys$sendfd(int sockfd, int fd);
  393. ErrorOr<FlatPtr> sys$recvfd(int sockfd, int options);
  394. ErrorOr<FlatPtr> sys$sysconf(int name);
  395. ErrorOr<FlatPtr> sys$disown(ProcessID);
  396. ErrorOr<FlatPtr> sys$allocate_tls(Userspace<char const*> initial_data, size_t);
  397. ErrorOr<FlatPtr> sys$prctl(int option, FlatPtr arg1, FlatPtr arg2);
  398. ErrorOr<FlatPtr> sys$set_coredump_metadata(Userspace<Syscall::SC_set_coredump_metadata_params const*>);
  399. ErrorOr<FlatPtr> sys$anon_create(size_t, int options);
  400. ErrorOr<FlatPtr> sys$statvfs(Userspace<Syscall::SC_statvfs_params const*> user_params);
  401. ErrorOr<FlatPtr> sys$fstatvfs(int fd, statvfs* buf);
  402. ErrorOr<FlatPtr> sys$map_time_page();
  403. ErrorOr<FlatPtr> sys$jail_create(Userspace<Syscall::SC_jail_create_params*> user_params);
  404. ErrorOr<FlatPtr> sys$jail_attach(Userspace<Syscall::SC_jail_attach_params const*> user_params);
  405. template<bool sockname, typename Params>
  406. ErrorOr<void> get_sock_or_peer_name(Params const&);
  407. static void initialize();
  408. [[noreturn]] void crash(int signal, FlatPtr ip, bool out_of_memory = false);
  409. [[nodiscard]] siginfo_t wait_info() const;
  410. const TTY* tty() const { return m_tty; }
  411. void set_tty(TTY*);
  412. u32 m_ticks_in_user { 0 };
  413. u32 m_ticks_in_kernel { 0 };
  414. u32 m_ticks_in_user_for_dead_children { 0 };
  415. u32 m_ticks_in_kernel_for_dead_children { 0 };
  416. NonnullRefPtr<Custody> current_directory();
  417. RefPtr<Custody> executable();
  418. RefPtr<Custody const> executable() const;
  419. static constexpr size_t max_arguments_size = Thread::default_userspace_stack_size / 8;
  420. static constexpr size_t max_environment_size = Thread::default_userspace_stack_size / 8;
  421. static constexpr size_t max_auxiliary_size = Thread::default_userspace_stack_size / 8;
  422. NonnullOwnPtrVector<KString> const& arguments() const { return m_arguments; };
  423. NonnullOwnPtrVector<KString> const& environment() const { return m_environment; };
  424. ErrorOr<void> exec(NonnullOwnPtr<KString> path, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, Thread*& new_main_thread, u32& prev_flags, int recursion_depth = 0);
  425. ErrorOr<LoadResult> load(NonnullLockRefPtr<OpenFileDescription> main_program_description, LockRefPtr<OpenFileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header);
  426. void terminate_due_to_signal(u8 signal);
  427. ErrorOr<void> send_signal(u8 signal, Process* sender);
  428. u8 termination_signal() const
  429. {
  430. return with_protected_data([](auto& protected_data) -> u8 {
  431. return protected_data.termination_signal;
  432. });
  433. }
  434. u8 termination_status() const
  435. {
  436. return with_protected_data([](auto& protected_data) { return protected_data.termination_status; });
  437. }
  438. u16 thread_count() const
  439. {
  440. return with_protected_data([](auto& protected_data) {
  441. return protected_data.thread_count.load(AK::MemoryOrder::memory_order_relaxed);
  442. });
  443. }
  444. Mutex& big_lock() { return m_big_lock; }
  445. Mutex& ptrace_lock() { return m_ptrace_lock; }
  446. bool has_promises() const
  447. {
  448. return with_protected_data([](auto& protected_data) { return protected_data.has_promises.load(); });
  449. }
  450. bool has_promised(Pledge pledge) const
  451. {
  452. return with_protected_data([&](auto& protected_data) {
  453. return (protected_data.promises & (1U << (u32)pledge)) != 0;
  454. });
  455. }
  456. VeilState veil_state() const
  457. {
  458. return m_unveil_data.with([&](auto const& unveil_data) { return unveil_data.state; });
  459. }
  460. struct UnveilData {
  461. explicit UnveilData(UnveilNode&& p)
  462. : paths(move(p))
  463. {
  464. }
  465. VeilState state { VeilState::None };
  466. UnveilNode paths;
  467. };
  468. auto& unveil_data() { return m_unveil_data; }
  469. auto const& unveil_data() const { return m_unveil_data; }
  470. auto& exec_unveil_data() { return m_exec_unveil_data; }
  471. auto const& exec_unveil_data() const { return m_exec_unveil_data; }
  472. bool wait_for_tracer_at_next_execve() const
  473. {
  474. return m_wait_for_tracer_at_next_execve;
  475. }
  476. void set_wait_for_tracer_at_next_execve(bool val)
  477. {
  478. m_wait_for_tracer_at_next_execve = val;
  479. }
  480. ErrorOr<void> peek_user_data(Span<u8> destination, Userspace<u8 const*> address);
  481. ErrorOr<FlatPtr> peek_user_data(Userspace<FlatPtr const*> address);
  482. ErrorOr<void> poke_user_data(Userspace<FlatPtr*> address, FlatPtr data);
  483. void disowned_by_waiter(Process& process);
  484. void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
  485. Thread::WaitBlockerSet& wait_blocker_set() { return m_wait_blocker_set; }
  486. template<typename Callback>
  487. ErrorOr<void> for_each_coredump_property(Callback callback) const
  488. {
  489. return m_coredump_properties.with([&](auto const& coredump_properties) -> ErrorOr<void> {
  490. for (auto const& property : coredump_properties) {
  491. if (property.key && property.value)
  492. TRY(callback(*property.key, *property.value));
  493. }
  494. return {};
  495. });
  496. }
  497. ErrorOr<void> set_coredump_property(NonnullOwnPtr<KString> key, NonnullOwnPtr<KString> value);
  498. ErrorOr<void> try_set_coredump_property(StringView key, StringView value);
  499. NonnullLockRefPtrVector<Thread> const& threads_for_coredump(Badge<Coredump>) const { return m_threads_for_coredump; }
  500. PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
  501. PerformanceEventBuffer const* perf_events() const { return m_perf_event_buffer; }
  502. SpinlockProtected<OwnPtr<Memory::AddressSpace>, LockRank::None>& address_space() { return m_space; }
  503. SpinlockProtected<OwnPtr<Memory::AddressSpace>, LockRank::None> const& address_space() const { return m_space; }
  504. VirtualAddress signal_trampoline() const
  505. {
  506. return with_protected_data([](auto& protected_data) { return protected_data.signal_trampoline; });
  507. }
  508. ErrorOr<void> require_promise(Pledge);
  509. ErrorOr<void> require_no_promises() const;
  510. ErrorOr<void> validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr) const;
  511. ErrorOr<void> validate_inode_mmap_prot(int prot, bool description_readable, bool description_writable, bool map_shared) const;
  512. private:
  513. friend class MemoryManager;
  514. friend class Scheduler;
  515. friend class Region;
  516. friend class PerformanceManager;
  517. bool add_thread(Thread&);
  518. bool remove_thread(Thread&);
  519. Process(NonnullOwnPtr<KString> name, NonnullRefPtr<Credentials>, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory, RefPtr<Custody> executable, TTY* tty, UnveilNode unveil_tree, UnveilNode exec_unveil_tree);
  520. static ErrorOr<NonnullLockRefPtr<Process>> try_create(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
  521. ErrorOr<void> attach_resources(NonnullOwnPtr<Memory::AddressSpace>&&, LockRefPtr<Thread>& first_thread, Process* fork_parent);
  522. static ProcessID allocate_pid();
  523. void kill_threads_except_self();
  524. void kill_all_threads();
  525. ErrorOr<void> dump_core();
  526. ErrorOr<void> dump_perfcore();
  527. bool create_perf_events_buffer_if_needed();
  528. void delete_perf_events_buffer();
  529. ErrorOr<void> do_exec(NonnullLockRefPtr<OpenFileDescription> main_program_description, NonnullOwnPtrVector<KString> arguments, NonnullOwnPtrVector<KString> environment, LockRefPtr<OpenFileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const ElfW(Ehdr) & main_program_header);
  530. ErrorOr<FlatPtr> do_write(OpenFileDescription&, UserOrKernelBuffer const&, size_t, Optional<off_t> = {});
  531. ErrorOr<FlatPtr> do_statvfs(FileSystem const& path, Custody const*, statvfs* buf);
  532. ErrorOr<LockRefPtr<OpenFileDescription>> find_elf_interpreter_for_executable(StringView path, ElfW(Ehdr) const& main_executable_header, size_t main_executable_header_size, size_t file_size);
  533. ErrorOr<void> do_kill(Process&, int signal);
  534. ErrorOr<void> do_killpg(ProcessGroupID pgrp, int signal);
  535. ErrorOr<void> do_killall(int signal);
  536. ErrorOr<void> do_killself(int signal);
  537. ErrorOr<siginfo_t> do_waitid(Variant<Empty, NonnullLockRefPtr<Process>, NonnullLockRefPtr<ProcessGroup>> waitee, int options);
  538. static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Userspace<char const*> user_path, size_t path_length);
  539. static ErrorOr<NonnullOwnPtr<KString>> get_syscall_path_argument(Syscall::StringArgument const&);
  540. bool has_tracee_thread(ProcessID tracer_pid);
  541. void clear_signal_handlers_for_exec();
  542. void clear_futex_queues_on_exec();
  543. ErrorOr<GlobalFutexKey> get_futex_key(FlatPtr user_address, bool shared);
  544. ErrorOr<void> remap_range_as_stack(FlatPtr address, size_t size);
  545. ErrorOr<FlatPtr> read_impl(int fd, Userspace<u8*> buffer, size_t size);
  546. public:
  547. NonnullLockRefPtr<ProcessProcFSTraits> procfs_traits() const { return *m_procfs_traits; }
  548. ErrorOr<void> procfs_get_fds_stats(KBufferBuilder& builder) const;
  549. ErrorOr<void> procfs_get_perf_events(KBufferBuilder& builder) const;
  550. ErrorOr<void> procfs_get_unveil_stats(KBufferBuilder& builder) const;
  551. ErrorOr<void> procfs_get_pledge_stats(KBufferBuilder& builder) const;
  552. ErrorOr<void> procfs_get_virtual_memory_stats(KBufferBuilder& builder) const;
  553. ErrorOr<void> procfs_get_binary_link(KBufferBuilder& builder) const;
  554. ErrorOr<void> procfs_get_current_work_directory_link(KBufferBuilder& builder) const;
  555. ErrorOr<void> procfs_get_command_line(KBufferBuilder& builder) const;
  556. mode_t binary_link_required_mode() const;
  557. ErrorOr<void> procfs_get_thread_stack(ThreadID thread_id, KBufferBuilder& builder) const;
  558. ErrorOr<void> traverse_stacks_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
  559. ErrorOr<NonnullLockRefPtr<Inode>> lookup_stacks_directory(ProcFS const&, StringView name) const;
  560. ErrorOr<size_t> procfs_get_file_description_link(unsigned fd, KBufferBuilder& builder) const;
  561. ErrorOr<void> traverse_file_descriptions_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
  562. ErrorOr<NonnullLockRefPtr<Inode>> lookup_file_descriptions_directory(ProcFS const&, StringView name) const;
  563. ErrorOr<NonnullLockRefPtr<Inode>> lookup_children_directory(ProcFS const&, StringView name) const;
  564. ErrorOr<void> traverse_children_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const;
  565. ErrorOr<size_t> procfs_get_child_proccess_link(ProcessID child_pid, KBufferBuilder& builder) const;
  566. private:
  567. inline PerformanceEventBuffer* current_perf_events_buffer()
  568. {
  569. if (g_profiling_all_threads)
  570. return g_global_perf_events;
  571. if (m_profiling)
  572. return m_perf_event_buffer.ptr();
  573. return nullptr;
  574. }
  575. IntrusiveListNode<Process> m_list_node;
  576. NonnullOwnPtr<KString> m_name;
  577. SpinlockProtected<OwnPtr<Memory::AddressSpace>, LockRank::None> m_space;
  578. LockRefPtr<ProcessGroup> m_pg;
  579. RecursiveSpinlock<LockRank::None> mutable m_protected_data_lock;
  580. AtomicEdgeAction<u32> m_protected_data_refs;
  581. void protect_data();
  582. void unprotect_data();
  583. OwnPtr<ThreadTracer> m_tracer;
  584. public:
  585. class OpenFileDescriptionAndFlags {
  586. public:
  587. bool is_valid() const { return !m_description.is_null(); }
  588. bool is_allocated() const { return m_is_allocated; }
  589. void allocate()
  590. {
  591. VERIFY(!m_is_allocated);
  592. VERIFY(!is_valid());
  593. m_is_allocated = true;
  594. }
  595. void deallocate()
  596. {
  597. VERIFY(m_is_allocated);
  598. VERIFY(!is_valid());
  599. m_is_allocated = false;
  600. }
  601. OpenFileDescription* description() { return m_description; }
  602. OpenFileDescription const* description() const { return m_description; }
  603. u32 flags() const { return m_flags; }
  604. void set_flags(u32 flags) { m_flags = flags; }
  605. void clear();
  606. void set(NonnullLockRefPtr<OpenFileDescription>&&, u32 flags = 0);
  607. private:
  608. LockRefPtr<OpenFileDescription> m_description;
  609. bool m_is_allocated { false };
  610. u32 m_flags { 0 };
  611. };
  612. class ScopedDescriptionAllocation;
  613. class OpenFileDescriptions {
  614. AK_MAKE_NONCOPYABLE(OpenFileDescriptions);
  615. AK_MAKE_NONMOVABLE(OpenFileDescriptions);
  616. friend class Process;
  617. public:
  618. OpenFileDescriptions() { }
  619. ALWAYS_INLINE OpenFileDescriptionAndFlags const& operator[](size_t i) const { return at(i); }
  620. ALWAYS_INLINE OpenFileDescriptionAndFlags& operator[](size_t i) { return at(i); }
  621. ErrorOr<void> try_clone(Kernel::Process::OpenFileDescriptions const& other)
  622. {
  623. TRY(try_resize(other.m_fds_metadatas.size()));
  624. for (size_t i = 0; i < other.m_fds_metadatas.size(); ++i) {
  625. m_fds_metadatas[i] = other.m_fds_metadatas[i];
  626. }
  627. return {};
  628. }
  629. OpenFileDescriptionAndFlags const& at(size_t i) const;
  630. OpenFileDescriptionAndFlags& at(size_t i);
  631. OpenFileDescriptionAndFlags const* get_if_valid(size_t i) const;
  632. OpenFileDescriptionAndFlags* get_if_valid(size_t i);
  633. void enumerate(Function<void(OpenFileDescriptionAndFlags const&)>) const;
  634. ErrorOr<void> try_enumerate(Function<ErrorOr<void>(OpenFileDescriptionAndFlags const&)>) const;
  635. void change_each(Function<void(OpenFileDescriptionAndFlags&)>);
  636. ErrorOr<ScopedDescriptionAllocation> allocate(int first_candidate_fd = 0);
  637. size_t open_count() const;
  638. ErrorOr<void> try_resize(size_t size) { return m_fds_metadatas.try_resize(size); }
  639. static constexpr size_t max_open()
  640. {
  641. return s_max_open_file_descriptors;
  642. }
  643. void clear()
  644. {
  645. m_fds_metadatas.clear();
  646. }
  647. ErrorOr<NonnullLockRefPtr<OpenFileDescription>> open_file_description(int fd) const;
  648. private:
  649. static constexpr size_t s_max_open_file_descriptors { FD_SETSIZE };
  650. Vector<OpenFileDescriptionAndFlags> m_fds_metadatas;
  651. };
  652. class ScopedDescriptionAllocation {
  653. AK_MAKE_NONCOPYABLE(ScopedDescriptionAllocation);
  654. public:
  655. ScopedDescriptionAllocation() = default;
  656. ScopedDescriptionAllocation(int tracked_fd, OpenFileDescriptionAndFlags* description)
  657. : fd(tracked_fd)
  658. , m_description(description)
  659. {
  660. }
  661. ScopedDescriptionAllocation(ScopedDescriptionAllocation&& other)
  662. : fd(other.fd)
  663. {
  664. // Take over the responsibility of tracking to deallocation.
  665. swap(m_description, other.m_description);
  666. }
  667. ScopedDescriptionAllocation& operator=(ScopedDescriptionAllocation&& other)
  668. {
  669. if (this != &other) {
  670. m_description = exchange(other.m_description, nullptr);
  671. fd = exchange(other.fd, -1);
  672. }
  673. return *this;
  674. }
  675. ~ScopedDescriptionAllocation()
  676. {
  677. if (m_description && m_description->is_allocated() && !m_description->is_valid()) {
  678. m_description->deallocate();
  679. }
  680. }
  681. int fd { -1 };
  682. private:
  683. OpenFileDescriptionAndFlags* m_description { nullptr };
  684. };
  685. class ProcessProcFSTraits : public ProcFSExposedComponent {
  686. public:
  687. static ErrorOr<NonnullLockRefPtr<ProcessProcFSTraits>> try_create(Badge<Process>, LockWeakPtr<Process> process)
  688. {
  689. return adopt_nonnull_lock_ref_or_enomem(new (nothrow) ProcessProcFSTraits(move(process)));
  690. }
  691. virtual InodeIndex component_index() const override;
  692. virtual ErrorOr<NonnullLockRefPtr<Inode>> to_inode(ProcFS const& procfs_instance) const override;
  693. virtual ErrorOr<void> traverse_as_directory(FileSystemID, Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)>) const override;
  694. virtual mode_t required_mode() const override { return 0555; }
  695. virtual UserID owner_user() const override;
  696. virtual GroupID owner_group() const override;
  697. private:
  698. explicit ProcessProcFSTraits(LockWeakPtr<Process> process)
  699. : m_process(move(process))
  700. {
  701. }
  702. // NOTE: We need to weakly hold on to the process, because otherwise
  703. // we would be creating a reference cycle.
  704. LockWeakPtr<Process> m_process;
  705. };
  706. MutexProtected<OpenFileDescriptions>& fds() { return m_fds; }
  707. MutexProtected<OpenFileDescriptions> const& fds() const { return m_fds; }
  708. ErrorOr<NonnullLockRefPtr<OpenFileDescription>> open_file_description(int fd)
  709. {
  710. return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
  711. }
  712. ErrorOr<NonnullLockRefPtr<OpenFileDescription>> open_file_description(int fd) const
  713. {
  714. return m_fds.with_shared([fd](auto& fds) { return fds.open_file_description(fd); });
  715. }
  716. ErrorOr<ScopedDescriptionAllocation> allocate_fd()
  717. {
  718. return m_fds.with_exclusive([](auto& fds) { return fds.allocate(); });
  719. }
  720. private:
  721. ErrorOr<NonnullRefPtr<Custody>> custody_for_dirfd(int dirfd);
  722. SpinlockProtected<Thread::ListInProcess, LockRank::None>& thread_list() { return m_thread_list; }
  723. SpinlockProtected<Thread::ListInProcess, LockRank::None> const& thread_list() const { return m_thread_list; }
  724. ErrorOr<NonnullRefPtr<Thread>> get_thread_from_pid_or_tid(pid_t pid_or_tid, Syscall::SchedulerParametersMode mode);
  725. SpinlockProtected<Thread::ListInProcess, LockRank::None> m_thread_list {};
  726. MutexProtected<OpenFileDescriptions> m_fds;
  727. bool const m_is_kernel_process;
  728. Atomic<State> m_state { State::Running };
  729. bool m_profiling { false };
  730. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_stopped { false };
  731. bool m_should_generate_coredump { false };
  732. SpinlockProtected<RefPtr<Custody>, LockRank::None> m_executable;
  733. SpinlockProtected<RefPtr<Custody>, LockRank::None> m_current_directory;
  734. NonnullOwnPtrVector<KString> m_arguments;
  735. NonnullOwnPtrVector<KString> m_environment;
  736. LockRefPtr<TTY> m_tty;
  737. LockWeakPtr<Memory::Region> m_master_tls_region;
  738. IntrusiveListNode<Process> m_jail_list_node;
  739. SpinlockProtected<RefPtr<Jail>, LockRank::Process> m_attached_jail {};
  740. size_t m_master_tls_size { 0 };
  741. size_t m_master_tls_alignment { 0 };
  742. Mutex m_big_lock { "Process"sv, Mutex::MutexBehavior::BigLock };
  743. Mutex m_ptrace_lock { "ptrace"sv };
  744. LockRefPtr<Timer> m_alarm_timer;
  745. SpinlockProtected<UnveilData, LockRank::None> m_unveil_data;
  746. SpinlockProtected<UnveilData, LockRank::None> m_exec_unveil_data;
  747. OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
  748. // This member is used in the implementation of ptrace's PT_TRACEME flag.
  749. // If it is set to true, the process will stop at the next execve syscall
  750. // and wait for a tracer to attach.
  751. bool m_wait_for_tracer_at_next_execve { false };
  752. Thread::WaitBlockerSet m_wait_blocker_set;
  753. struct CoredumpProperty {
  754. OwnPtr<KString> key;
  755. OwnPtr<KString> value;
  756. };
  757. SpinlockProtected<Array<CoredumpProperty, 4>, LockRank::None> m_coredump_properties {};
  758. NonnullLockRefPtrVector<Thread> m_threads_for_coredump;
  759. mutable LockRefPtr<ProcessProcFSTraits> m_procfs_traits;
  760. struct SignalActionData {
  761. VirtualAddress handler_or_sigaction;
  762. int flags { 0 };
  763. u32 mask { 0 };
  764. };
  765. Array<SignalActionData, NSIG> m_signal_action_data;
  766. static_assert(sizeof(ProtectedValues) < (PAGE_SIZE));
  767. alignas(4096) ProtectedValues m_protected_values_do_not_access_directly;
  768. u8 m_protected_values_padding[PAGE_SIZE - sizeof(ProtectedValues)];
  769. public:
  770. using List = IntrusiveListRelaxedConst<&Process::m_list_node>;
  771. static SpinlockProtected<Process::List, LockRank::None>& all_instances();
  772. };
  773. // Note: Process object should be 2 pages of 4096 bytes each.
  774. // It's not expected that the Process object will expand further because the first
  775. // page is used for all unprotected values (which should be plenty of space for them).
  776. // The second page is being used exclusively for write-protected values.
  777. static_assert(AssertSize<Process, (PAGE_SIZE * 2)>());
  778. extern RecursiveSpinlock<LockRank::None> g_profiling_lock;
  779. template<IteratorFunction<Thread&> Callback>
  780. inline IterationDecision Process::for_each_thread(Callback callback)
  781. {
  782. return thread_list().with([&](auto& thread_list) -> IterationDecision {
  783. for (auto& thread : thread_list) {
  784. IterationDecision decision = callback(thread);
  785. if (decision != IterationDecision::Continue)
  786. return decision;
  787. }
  788. return IterationDecision::Continue;
  789. });
  790. }
  791. template<IteratorFunction<Process&> Callback>
  792. inline void Process::for_each_ignoring_jails(Callback callback)
  793. {
  794. Process::all_instances().with([&](auto const& list) {
  795. for (auto it = list.begin(); it != list.end();) {
  796. auto& process = *it;
  797. ++it;
  798. if (callback(process) == IterationDecision::Break)
  799. break;
  800. }
  801. });
  802. }
  803. template<IteratorFunction<Thread&> Callback>
  804. inline IterationDecision Process::for_each_thread(Callback callback) const
  805. {
  806. return thread_list().with([&](auto& thread_list) -> IterationDecision {
  807. for (auto& thread : thread_list) {
  808. IterationDecision decision = callback(thread);
  809. if (decision != IterationDecision::Continue)
  810. return decision;
  811. }
  812. return IterationDecision::Continue;
  813. });
  814. }
  815. template<VoidFunction<Thread&> Callback>
  816. inline IterationDecision Process::for_each_thread(Callback callback) const
  817. {
  818. thread_list().with([&](auto& thread_list) {
  819. for (auto& thread : thread_list)
  820. callback(thread);
  821. });
  822. return IterationDecision::Continue;
  823. }
  824. inline ErrorOr<void> Process::try_for_each_thread(Function<ErrorOr<void>(Thread const&)> callback) const
  825. {
  826. return thread_list().with([&](auto& thread_list) -> ErrorOr<void> {
  827. for (auto& thread : thread_list)
  828. TRY(callback(thread));
  829. return {};
  830. });
  831. }
  832. template<VoidFunction<Thread&> Callback>
  833. inline IterationDecision Process::for_each_thread(Callback callback)
  834. {
  835. thread_list().with([&](auto& thread_list) {
  836. for (auto& thread : thread_list)
  837. callback(thread);
  838. });
  839. return IterationDecision::Continue;
  840. }
  841. inline ProcessID Thread::pid() const
  842. {
  843. return m_process->pid();
  844. }
  845. }
  846. #define VERIFY_PROCESS_BIG_LOCK_ACQUIRED(process) \
  847. VERIFY(process->big_lock().is_exclusively_locked_by_current_thread())
  848. #define VERIFY_NO_PROCESS_BIG_LOCK(process) \
  849. VERIFY(!process->big_lock().is_exclusively_locked_by_current_thread())
  850. inline ErrorOr<NonnullOwnPtr<KString>> try_copy_kstring_from_user(Kernel::Syscall::StringArgument const& string)
  851. {
  852. Userspace<char const*> characters((FlatPtr)string.characters);
  853. return try_copy_kstring_from_user(characters, string.length);
  854. }
  855. template<>
  856. struct AK::Formatter<Kernel::Process> : AK::Formatter<FormatString> {
  857. ErrorOr<void> format(FormatBuilder& builder, Kernel::Process const& value)
  858. {
  859. return AK::Formatter<FormatString>::format(builder, "{}({})"sv, value.name(), value.pid().value());
  860. }
  861. };
  862. namespace AK {
  863. template<>
  864. struct Traits<Kernel::GlobalFutexKey> : public GenericTraits<Kernel::GlobalFutexKey> {
  865. static unsigned hash(Kernel::GlobalFutexKey const& futex_key) { return pair_int_hash(ptr_hash(futex_key.raw.parent), ptr_hash(futex_key.raw.offset)); }
  866. static bool equals(Kernel::GlobalFutexKey const& a, Kernel::GlobalFutexKey const& b) { return a.raw.parent == b.raw.parent && a.raw.offset == b.raw.offset; }
  867. };
  868. };