Thread.h 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/Error.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #include <AK/Time.h>
  14. #include <AK/Variant.h>
  15. #include <AK/Vector.h>
  16. #include <Kernel/API/POSIX/sched.h>
  17. #include <Kernel/API/POSIX/select.h>
  18. #include <Kernel/API/POSIX/signal_numbers.h>
  19. #include <Kernel/Arch/RegisterState.h>
  20. #include <Kernel/Arch/ThreadRegisters.h>
  21. #include <Kernel/Debug.h>
  22. #include <Kernel/Forward.h>
  23. #include <Kernel/Library/KString.h>
  24. #include <Kernel/Library/ListedRefCounted.h>
  25. #include <Kernel/Library/LockWeakPtr.h>
  26. #include <Kernel/Library/LockWeakable.h>
  27. #include <Kernel/Locking/LockLocation.h>
  28. #include <Kernel/Locking/LockMode.h>
  29. #include <Kernel/Locking/LockRank.h>
  30. #include <Kernel/Locking/SpinlockProtected.h>
  31. #include <Kernel/Memory/VirtualRange.h>
  32. #include <Kernel/UnixTypes.h>
  33. namespace Kernel {
  34. class Timer;
  35. enum class DispatchSignalResult {
  36. Deferred = 0,
  37. Yield,
  38. Terminate,
  39. Continue
  40. };
  41. struct ThreadSpecificData {
  42. ThreadSpecificData* self;
  43. };
  44. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  45. class Thread
  46. : public ListedRefCounted<Thread, LockType::Spinlock>
  47. , public LockWeakable<Thread> {
  48. AK_MAKE_NONCOPYABLE(Thread);
  49. AK_MAKE_NONMOVABLE(Thread);
  50. friend class Mutex;
  51. friend class Process;
  52. friend class Scheduler;
  53. friend struct ThreadReadyQueue;
  54. public:
  55. static Thread* current()
  56. {
  57. return Processor::current_thread();
  58. }
  59. static ErrorOr<NonnullRefPtr<Thread>> create(NonnullRefPtr<Process>);
  60. ~Thread();
  61. static RefPtr<Thread> from_tid(ThreadID);
  62. static void finalize_dying_threads();
  63. ThreadID tid() const { return m_tid; }
  64. ProcessID pid() const;
  65. void set_priority(u32 p) { m_priority = p; }
  66. u32 priority() const { return m_priority; }
  67. void detach()
  68. {
  69. SpinlockLocker lock(m_lock);
  70. m_is_joinable = false;
  71. }
  72. [[nodiscard]] bool is_joinable() const
  73. {
  74. SpinlockLocker lock(m_lock);
  75. return m_is_joinable;
  76. }
  77. Process& process() { return m_process; }
  78. Process const& process() const { return m_process; }
  79. SpinlockProtected<NonnullOwnPtr<KString>, LockRank::None> const& name() const
  80. {
  81. return m_name;
  82. }
  83. void set_name(NonnullOwnPtr<KString> name);
  84. void finalize();
  85. enum class State : u8 {
  86. Invalid = 0,
  87. Runnable,
  88. Running,
  89. Dying,
  90. Dead,
  91. Stopped,
  92. Blocked,
  93. };
  94. class [[nodiscard]] BlockResult {
  95. public:
  96. enum Type {
  97. WokeNormally,
  98. NotBlocked,
  99. InterruptedBySignal,
  100. InterruptedByDeath,
  101. InterruptedByTimeout,
  102. };
  103. BlockResult() = delete;
  104. BlockResult(Type type)
  105. : m_type(type)
  106. {
  107. }
  108. bool operator==(Type type) const
  109. {
  110. return m_type == type;
  111. }
  112. bool operator!=(Type type) const
  113. {
  114. return m_type != type;
  115. }
  116. [[nodiscard]] bool was_interrupted() const
  117. {
  118. switch (m_type) {
  119. case InterruptedBySignal:
  120. case InterruptedByDeath:
  121. return true;
  122. default:
  123. return false;
  124. }
  125. }
  126. private:
  127. Type m_type;
  128. };
  129. class BlockTimeout {
  130. public:
  131. BlockTimeout()
  132. : m_infinite(true)
  133. {
  134. }
  135. explicit BlockTimeout(bool is_absolute, Duration const* time, Duration const* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  136. Duration const& absolute_time() const { return m_time; }
  137. Duration const* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  138. clockid_t clock_id() const { return m_clock_id; }
  139. bool is_infinite() const { return m_infinite; }
  140. private:
  141. Duration m_time {};
  142. Duration m_start_time {};
  143. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  144. bool m_infinite { false };
  145. };
  146. class BlockerSet;
  147. class Blocker {
  148. AK_MAKE_NONMOVABLE(Blocker);
  149. AK_MAKE_NONCOPYABLE(Blocker);
  150. public:
  151. enum class Type {
  152. Unknown = 0,
  153. File,
  154. Futex,
  155. Plan9FS,
  156. Join,
  157. Queue,
  158. Routing,
  159. Sleep,
  160. Signal,
  161. Wait,
  162. Flock
  163. };
  164. virtual ~Blocker();
  165. virtual StringView state_string() const = 0;
  166. virtual Type blocker_type() const = 0;
  167. virtual BlockTimeout const& override_timeout(BlockTimeout const& timeout) { return timeout; }
  168. virtual bool can_be_interrupted() const { return true; }
  169. virtual bool setup_blocker();
  170. virtual void finalize();
  171. Thread& thread() { return m_thread; }
  172. enum class UnblockImmediatelyReason {
  173. UnblockConditionAlreadyMet,
  174. TimeoutInThePast,
  175. };
  176. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
  177. virtual void was_unblocked(bool did_timeout)
  178. {
  179. if (did_timeout) {
  180. SpinlockLocker lock(m_lock);
  181. m_did_timeout = true;
  182. }
  183. }
  184. void set_interrupted_by_death()
  185. {
  186. SpinlockLocker lock(m_lock);
  187. do_set_interrupted_by_death();
  188. }
  189. void set_interrupted_by_signal(u8 signal)
  190. {
  191. SpinlockLocker lock(m_lock);
  192. do_set_interrupted_by_signal(signal);
  193. }
  194. u8 was_interrupted_by_signal() const
  195. {
  196. SpinlockLocker lock(m_lock);
  197. return do_get_interrupted_by_signal();
  198. }
  199. virtual Thread::BlockResult block_result()
  200. {
  201. SpinlockLocker lock(m_lock);
  202. if (m_was_interrupted_by_death)
  203. return Thread::BlockResult::InterruptedByDeath;
  204. if (m_was_interrupted_by_signal != 0)
  205. return Thread::BlockResult::InterruptedBySignal;
  206. if (m_did_timeout)
  207. return Thread::BlockResult::InterruptedByTimeout;
  208. return Thread::BlockResult::WokeNormally;
  209. }
  210. void begin_blocking(Badge<Thread>);
  211. BlockResult end_blocking(Badge<Thread>, bool);
  212. protected:
  213. Blocker()
  214. : m_thread(*Thread::current())
  215. {
  216. }
  217. void do_set_interrupted_by_death()
  218. {
  219. m_was_interrupted_by_death = true;
  220. }
  221. void do_set_interrupted_by_signal(u8 signal)
  222. {
  223. VERIFY(signal != 0);
  224. m_was_interrupted_by_signal = signal;
  225. }
  226. void do_clear_interrupted_by_signal()
  227. {
  228. m_was_interrupted_by_signal = 0;
  229. }
  230. u8 do_get_interrupted_by_signal() const
  231. {
  232. return m_was_interrupted_by_signal;
  233. }
  234. [[nodiscard]] bool was_interrupted() const
  235. {
  236. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  237. }
  238. void unblock_from_blocker()
  239. {
  240. {
  241. SpinlockLocker lock(m_lock);
  242. if (!m_is_blocking)
  243. return;
  244. m_is_blocking = false;
  245. }
  246. m_thread->unblock_from_blocker(*this);
  247. }
  248. bool add_to_blocker_set(BlockerSet&, void* = nullptr);
  249. void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
  250. // FIXME: Figure out whether this can be Thread.
  251. mutable RecursiveSpinlock<LockRank::None> m_lock {};
  252. private:
  253. BlockerSet* m_blocker_set { nullptr };
  254. NonnullRefPtr<Thread> const m_thread;
  255. u8 m_was_interrupted_by_signal { 0 };
  256. bool m_is_blocking { false };
  257. bool m_was_interrupted_by_death { false };
  258. bool m_did_timeout { false };
  259. };
  260. class BlockerSet {
  261. AK_MAKE_NONCOPYABLE(BlockerSet);
  262. AK_MAKE_NONMOVABLE(BlockerSet);
  263. public:
  264. BlockerSet() = default;
  265. virtual ~BlockerSet()
  266. {
  267. VERIFY(!m_lock.is_locked());
  268. VERIFY(m_blockers.is_empty());
  269. }
  270. bool add_blocker(Blocker& blocker, void* data)
  271. {
  272. SpinlockLocker lock(m_lock);
  273. if (!should_add_blocker(blocker, data))
  274. return false;
  275. m_blockers.append({ &blocker, data });
  276. return true;
  277. }
  278. void remove_blocker(Blocker& blocker)
  279. {
  280. SpinlockLocker lock(m_lock);
  281. // NOTE: it's possible that the blocker is no longer present
  282. m_blockers.remove_all_matching([&](auto& info) {
  283. return info.blocker == &blocker;
  284. });
  285. }
  286. bool is_empty() const
  287. {
  288. SpinlockLocker lock(m_lock);
  289. return is_empty_locked();
  290. }
  291. protected:
  292. template<typename Callback>
  293. bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
  294. {
  295. SpinlockLocker lock(m_lock);
  296. return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
  297. }
  298. template<typename Callback>
  299. bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
  300. {
  301. VERIFY(m_lock.is_locked());
  302. bool stop_iterating = false;
  303. bool did_unblock_any = false;
  304. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  305. auto& info = m_blockers[i];
  306. if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
  307. m_blockers.remove(i);
  308. did_unblock_any = true;
  309. continue;
  310. }
  311. i++;
  312. }
  313. return did_unblock_any;
  314. }
  315. bool is_empty_locked() const
  316. {
  317. VERIFY(m_lock.is_locked());
  318. return m_blockers.is_empty();
  319. }
  320. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  321. struct BlockerInfo {
  322. Blocker* blocker;
  323. void* data;
  324. };
  325. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  326. {
  327. if (m_blockers.size() <= count)
  328. return move(m_blockers);
  329. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  330. VERIFY(move_count > 0);
  331. Vector<BlockerInfo, 4> taken_blockers;
  332. taken_blockers.ensure_capacity(move_count);
  333. for (size_t i = 0; i < move_count; i++)
  334. taken_blockers.append(m_blockers.take(i));
  335. m_blockers.remove(0, move_count);
  336. return taken_blockers;
  337. }
  338. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  339. {
  340. if (blockers_to_append.is_empty())
  341. return;
  342. if (m_blockers.is_empty()) {
  343. m_blockers = move(blockers_to_append);
  344. return;
  345. }
  346. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  347. for (size_t i = 0; i < blockers_to_append.size(); i++)
  348. m_blockers.append(blockers_to_append.take(i));
  349. blockers_to_append.clear();
  350. }
  351. // FIXME: Check whether this can be Thread.
  352. mutable Spinlock<LockRank::None> m_lock {};
  353. private:
  354. Vector<BlockerInfo, 4> m_blockers;
  355. };
  356. friend class JoinBlocker;
  357. class JoinBlocker final : public Blocker {
  358. public:
  359. explicit JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value);
  360. virtual Type blocker_type() const override { return Type::Join; }
  361. virtual StringView state_string() const override { return "Joining"sv; }
  362. virtual bool can_be_interrupted() const override { return false; }
  363. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  364. virtual bool setup_blocker() override;
  365. bool unblock(void*, bool);
  366. private:
  367. NonnullRefPtr<Thread> const m_joinee;
  368. void*& m_joinee_exit_value;
  369. ErrorOr<void>& m_try_join_result;
  370. bool m_did_unblock { false };
  371. };
  372. class WaitQueueBlocker final : public Blocker {
  373. public:
  374. explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
  375. virtual ~WaitQueueBlocker();
  376. virtual Type blocker_type() const override { return Type::Queue; }
  377. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  378. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  379. virtual bool setup_blocker() override;
  380. bool unblock();
  381. protected:
  382. WaitQueue& m_wait_queue;
  383. StringView m_block_reason;
  384. bool m_did_unblock { false };
  385. };
  386. class FutexBlocker final : public Blocker {
  387. public:
  388. explicit FutexBlocker(FutexQueue&, u32);
  389. virtual ~FutexBlocker();
  390. virtual Type blocker_type() const override { return Type::Futex; }
  391. virtual StringView state_string() const override { return "Futex"sv; }
  392. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  393. virtual bool setup_blocker() override;
  394. u32 bitset() const { return m_bitset; }
  395. void begin_requeue()
  396. {
  397. // We need to hold the lock until we moved it over
  398. m_previous_interrupts_state = m_lock.lock();
  399. }
  400. void finish_requeue(FutexQueue&);
  401. bool unblock_bitset(u32 bitset);
  402. bool unblock(bool force = false);
  403. protected:
  404. FutexQueue& m_futex_queue;
  405. u32 m_bitset { 0 };
  406. InterruptsState m_previous_interrupts_state { InterruptsState::Disabled };
  407. bool m_did_unblock { false };
  408. };
  409. class FileBlocker : public Blocker {
  410. public:
  411. enum class BlockFlags : u16 {
  412. None = 0,
  413. Read = 1 << 0,
  414. Write = 1 << 1,
  415. ReadPriority = 1 << 2,
  416. WritePriority = 1 << 3,
  417. Accept = 1 << 4,
  418. Connect = 1 << 5,
  419. SocketFlags = Accept | Connect,
  420. WriteError = 1 << 6,
  421. WriteHangUp = 1 << 7,
  422. ReadHangUp = 1 << 8,
  423. Exception = WriteError | WriteHangUp | ReadHangUp,
  424. };
  425. virtual Type blocker_type() const override { return Type::File; }
  426. virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
  427. };
  428. class OpenFileDescriptionBlocker : public FileBlocker {
  429. public:
  430. OpenFileDescription const& blocked_description() const;
  431. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  432. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  433. virtual bool setup_blocker() override;
  434. protected:
  435. explicit OpenFileDescriptionBlocker(OpenFileDescription&, BlockFlags, BlockFlags&);
  436. private:
  437. NonnullRefPtr<OpenFileDescription> m_blocked_description;
  438. const BlockFlags m_flags;
  439. BlockFlags& m_unblocked_flags;
  440. bool m_did_unblock { false };
  441. };
  442. class AcceptBlocker final : public OpenFileDescriptionBlocker {
  443. public:
  444. explicit AcceptBlocker(OpenFileDescription&, BlockFlags&);
  445. virtual StringView state_string() const override { return "Accepting"sv; }
  446. };
  447. class ConnectBlocker final : public OpenFileDescriptionBlocker {
  448. public:
  449. explicit ConnectBlocker(OpenFileDescription&, BlockFlags&);
  450. virtual StringView state_string() const override { return "Connecting"sv; }
  451. };
  452. class WriteBlocker final : public OpenFileDescriptionBlocker {
  453. public:
  454. explicit WriteBlocker(OpenFileDescription&, BlockFlags&);
  455. virtual StringView state_string() const override { return "Writing"sv; }
  456. virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
  457. private:
  458. BlockTimeout m_timeout;
  459. };
  460. class ReadBlocker final : public OpenFileDescriptionBlocker {
  461. public:
  462. explicit ReadBlocker(OpenFileDescription&, BlockFlags&);
  463. virtual StringView state_string() const override { return "Reading"sv; }
  464. virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
  465. private:
  466. BlockTimeout m_timeout;
  467. };
  468. class SleepBlocker final : public Blocker {
  469. public:
  470. explicit SleepBlocker(BlockTimeout const&, Duration* = nullptr);
  471. virtual StringView state_string() const override { return "Sleeping"sv; }
  472. virtual Type blocker_type() const override { return Type::Sleep; }
  473. virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
  474. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  475. virtual void was_unblocked(bool) override;
  476. virtual Thread::BlockResult block_result() override;
  477. private:
  478. void calculate_remaining();
  479. BlockTimeout m_deadline;
  480. Duration* m_remaining;
  481. };
  482. class SelectBlocker final : public FileBlocker {
  483. public:
  484. struct FDInfo {
  485. RefPtr<OpenFileDescription> description;
  486. BlockFlags block_flags { BlockFlags::None };
  487. BlockFlags unblocked_flags { BlockFlags::None };
  488. };
  489. using FDVector = Vector<FDInfo, FD_SETSIZE>;
  490. explicit SelectBlocker(FDVector&);
  491. virtual ~SelectBlocker();
  492. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  493. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  494. virtual void was_unblocked(bool) override;
  495. virtual StringView state_string() const override { return "Selecting"sv; }
  496. virtual bool setup_blocker() override;
  497. virtual void finalize() override;
  498. private:
  499. size_t collect_unblocked_flags();
  500. FDVector& m_fds;
  501. bool m_did_unblock { false };
  502. };
  503. class SignalBlocker final : public Blocker {
  504. public:
  505. explicit SignalBlocker(sigset_t pending_set, siginfo_t& result);
  506. virtual StringView state_string() const override { return "Pending Signal"sv; }
  507. virtual Type blocker_type() const override { return Type::Signal; }
  508. void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  509. virtual bool setup_blocker() override;
  510. bool check_pending_signals(bool from_add_blocker);
  511. private:
  512. sigset_t m_pending_set { 0 };
  513. siginfo_t& m_result;
  514. bool m_did_unblock { false };
  515. };
  516. class SignalBlockerSet final : public BlockerSet {
  517. public:
  518. void unblock_all_blockers_whose_conditions_are_met()
  519. {
  520. BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
  521. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  522. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  523. return blocker.check_pending_signals(false);
  524. });
  525. }
  526. private:
  527. bool should_add_blocker(Blocker& b, void*) override
  528. {
  529. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  530. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  531. return !blocker.check_pending_signals(true);
  532. }
  533. };
  534. class WaitBlocker final : public Blocker {
  535. public:
  536. enum class UnblockFlags {
  537. Terminated,
  538. Stopped,
  539. Continued,
  540. Disowned
  541. };
  542. WaitBlocker(int wait_options, Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result);
  543. virtual StringView state_string() const override { return "Waiting"sv; }
  544. virtual Type blocker_type() const override { return Type::Wait; }
  545. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  546. virtual void was_unblocked(bool) override;
  547. virtual bool setup_blocker() override;
  548. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  549. bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
  550. private:
  551. void do_was_disowned();
  552. void do_set_result(siginfo_t const&);
  553. int const m_wait_options;
  554. ErrorOr<siginfo_t>& m_result;
  555. Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> const m_waitee;
  556. bool m_did_unblock { false };
  557. bool m_got_sigchild { false };
  558. };
  559. class WaitBlockerSet final : public BlockerSet {
  560. friend class WaitBlocker;
  561. public:
  562. explicit WaitBlockerSet(Process& process)
  563. : m_process(process)
  564. {
  565. }
  566. void disowned_by_waiter(Process&);
  567. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  568. void try_unblock(WaitBlocker&);
  569. void finalize();
  570. protected:
  571. virtual bool should_add_blocker(Blocker&, void*) override;
  572. private:
  573. struct ProcessBlockInfo {
  574. NonnullRefPtr<Process> const process;
  575. WaitBlocker::UnblockFlags flags;
  576. u8 signal;
  577. bool was_waited { false };
  578. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  579. ~ProcessBlockInfo();
  580. };
  581. Process& m_process;
  582. Vector<ProcessBlockInfo, 2> m_processes;
  583. bool m_finalized { false };
  584. };
  585. class FlockBlocker final : public Blocker {
  586. public:
  587. FlockBlocker(NonnullRefPtr<Inode>, flock const&);
  588. virtual StringView state_string() const override { return "Locking File"sv; }
  589. virtual Type blocker_type() const override { return Type::Flock; }
  590. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  591. virtual bool setup_blocker() override;
  592. bool try_unblock(bool from_add_blocker);
  593. private:
  594. NonnullRefPtr<Inode> m_inode;
  595. flock const& m_flock;
  596. bool m_did_unblock { false };
  597. };
  598. class FlockBlockerSet final : public BlockerSet {
  599. public:
  600. void unblock_all_blockers_whose_conditions_are_met()
  601. {
  602. BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
  603. VERIFY(b.blocker_type() == Blocker::Type::Flock);
  604. auto& blocker = static_cast<Thread::FlockBlocker&>(b);
  605. return blocker.try_unblock(false);
  606. });
  607. }
  608. private:
  609. bool should_add_blocker(Blocker& b, void*) override
  610. {
  611. VERIFY(b.blocker_type() == Blocker::Type::Flock);
  612. auto& blocker = static_cast<Thread::FlockBlocker&>(b);
  613. return !blocker.try_unblock(true);
  614. }
  615. };
  616. template<typename AddBlockerHandler>
  617. ErrorOr<void> try_join(AddBlockerHandler add_blocker)
  618. {
  619. if (Thread::current() == this)
  620. return EDEADLK;
  621. SpinlockLocker lock(m_lock);
  622. // Joining dead threads is allowed for two main reasons:
  623. // - Thread join behavior should not be racy when a thread is joined and exiting at roughly the same time.
  624. // This is common behavior when threads are given a signal to end (meaning they are going to exit ASAP) and then joined.
  625. // - POSIX requires that exited threads are joinable (at least, there is no language in the specification forbidding it).
  626. if (!m_is_joinable || state() == Thread::State::Invalid)
  627. return EINVAL;
  628. add_blocker();
  629. // From this point on the thread is no longer joinable by anyone
  630. // else. It also means that if the join is timed, it becomes
  631. // detached when a timeout happens.
  632. m_is_joinable = false;
  633. return {};
  634. }
  635. void did_schedule() { ++m_times_scheduled; }
  636. u32 times_scheduled() const { return m_times_scheduled; }
  637. void resume_from_stopped();
  638. [[nodiscard]] bool should_be_stopped() const;
  639. [[nodiscard]] bool is_stopped() const { return m_state == Thread::State::Stopped; }
  640. [[nodiscard]] bool is_blocked() const { return m_state == Thread::State::Blocked; }
  641. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  642. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  643. u32 affinity() const { return m_cpu_affinity; }
  644. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  645. RegisterState& get_register_dump_from_stack();
  646. RegisterState const& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  647. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  648. DebugRegisterState const& debug_register_state() const { return m_debug_register_state; }
  649. ThreadRegisters& regs() { return m_regs; }
  650. ThreadRegisters const& regs() const { return m_regs; }
  651. State state() const { return m_state; }
  652. StringView state_string() const;
  653. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  654. size_t thread_specific_region_size() const;
  655. size_t thread_specific_region_alignment() const;
  656. ALWAYS_INLINE void yield_if_stopped()
  657. {
  658. // If some thread stopped us, we need to yield to someone else
  659. // We check this when entering/exiting a system call. A thread
  660. // may continue to execute in user land until the next timer
  661. // tick or entering the next system call, or if it's in kernel
  662. // mode then we will intercept prior to returning back to user
  663. // mode.
  664. SpinlockLocker lock(m_lock);
  665. while (state() == Thread::State::Stopped) {
  666. lock.unlock();
  667. // We shouldn't be holding the big lock here
  668. yield_without_releasing_big_lock();
  669. lock.lock();
  670. }
  671. }
  672. void block(Kernel::Mutex&, SpinlockLocker<Spinlock<LockRank::None>>&, u32);
  673. template<typename BlockerType, class... Args>
  674. BlockResult block(BlockTimeout const& timeout, Args&&... args)
  675. {
  676. BlockerType blocker(forward<Args>(args)...);
  677. return block_impl(timeout, blocker);
  678. }
  679. u32 unblock_from_mutex(Kernel::Mutex&);
  680. void unblock_from_blocker(Blocker&);
  681. void unblock(u8 signal = 0);
  682. template<class... Args>
  683. Thread::BlockResult wait_on(WaitQueue& wait_queue, Thread::BlockTimeout const& timeout, Args&&... args)
  684. {
  685. VERIFY(this == Thread::current());
  686. return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  687. }
  688. BlockResult sleep(clockid_t, Duration const&, Duration* = nullptr);
  689. BlockResult sleep(Duration const& duration, Duration* remaining_time = nullptr)
  690. {
  691. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  692. }
  693. BlockResult sleep_until(clockid_t, Duration const&);
  694. BlockResult sleep_until(Duration const& duration)
  695. {
  696. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  697. }
  698. // Tell this thread to unblock if needed,
  699. // gracefully unwind the stack and die.
  700. void set_should_die();
  701. [[nodiscard]] bool should_die() const { return m_should_die; }
  702. void die_if_needed();
  703. void exit(void* = nullptr);
  704. void update_time_scheduled(u64, bool, bool);
  705. bool tick();
  706. void set_ticks_left(u32 t) { m_ticks_left = t; }
  707. u32 ticks_left() const { return m_ticks_left; }
  708. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  709. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  710. void set_state(State, u8 = 0);
  711. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  712. void set_initialized(bool initialized) { m_initialized = initialized; }
  713. void send_urgent_signal_to_self(u8 signal);
  714. void send_signal(u8 signal, Process* sender);
  715. u32 update_signal_mask(u32 signal_mask);
  716. u32 signal_mask_block(sigset_t signal_set, bool block);
  717. u32 signal_mask() const;
  718. void reset_signals_for_exec();
  719. ErrorOr<FlatPtr> peek_debug_register(u32 register_index);
  720. ErrorOr<void> poke_debug_register(u32 register_index, FlatPtr data);
  721. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  722. DispatchSignalResult dispatch_one_pending_signal();
  723. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  724. DispatchSignalResult dispatch_signal(u8 signal);
  725. void check_dispatch_pending_signal();
  726. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  727. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  728. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  729. [[nodiscard]] bool is_signal_masked(u8 signal) const;
  730. u32 pending_signals() const;
  731. u32 pending_signals_for_state() const;
  732. [[nodiscard]] bool has_alternative_signal_stack() const;
  733. [[nodiscard]] bool is_in_alternative_signal_stack() const;
  734. FPUState& fpu_state() { return m_fpu_state; }
  735. ErrorOr<void> make_thread_specific_region(Badge<Process>);
  736. unsigned syscall_count() const { return m_syscall_count; }
  737. void did_syscall() { ++m_syscall_count; }
  738. unsigned inode_faults() const { return m_inode_faults; }
  739. void did_inode_fault() { ++m_inode_faults; }
  740. unsigned zero_faults() const { return m_zero_faults; }
  741. void did_zero_fault() { ++m_zero_faults; }
  742. unsigned cow_faults() const { return m_cow_faults; }
  743. void did_cow_fault() { ++m_cow_faults; }
  744. u64 file_read_bytes() const { return m_file_read_bytes; }
  745. u64 file_write_bytes() const { return m_file_write_bytes; }
  746. void did_file_read(u64 bytes)
  747. {
  748. m_file_read_bytes += bytes;
  749. }
  750. void did_file_write(u64 bytes)
  751. {
  752. m_file_write_bytes += bytes;
  753. }
  754. u64 unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  755. u64 unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  756. void did_unix_socket_read(u64 bytes)
  757. {
  758. m_unix_socket_read_bytes += bytes;
  759. }
  760. void did_unix_socket_write(u64 bytes)
  761. {
  762. m_unix_socket_write_bytes += bytes;
  763. }
  764. u64 ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  765. u64 ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  766. void did_ipv4_socket_read(u64 bytes)
  767. {
  768. m_ipv4_socket_read_bytes += bytes;
  769. }
  770. void did_ipv4_socket_write(u64 bytes)
  771. {
  772. m_ipv4_socket_write_bytes += bytes;
  773. }
  774. void set_active(bool active) { m_is_active = active; }
  775. u32 saved_critical() const { return m_saved_critical; }
  776. void save_critical(u32 critical) { m_saved_critical = critical; }
  777. void track_lock_acquire(LockRank rank);
  778. void track_lock_release(LockRank rank);
  779. [[nodiscard]] bool is_active() const { return m_is_active; }
  780. [[nodiscard]] bool is_finalizable() const
  781. {
  782. // We can't finalize as long as this thread is still running
  783. // Note that checking for Running state here isn't sufficient
  784. // as the thread may not be in Running state but switching out.
  785. // m_is_active is set to false once the context switch is
  786. // complete and the thread is not executing on any processor.
  787. if (m_is_active.load(AK::memory_order_acquire))
  788. return false;
  789. // We can't finalize until the thread is either detached or
  790. // a join has started. We can't make m_is_joinable atomic
  791. // because that would introduce a race in try_join.
  792. SpinlockLocker lock(m_lock);
  793. return !m_is_joinable;
  794. }
  795. ErrorOr<NonnullRefPtr<Thread>> clone(NonnullRefPtr<Process>);
  796. template<IteratorFunction<Thread&> Callback>
  797. static IterationDecision for_each_in_state(State, Callback);
  798. template<IteratorFunction<Thread&> Callback>
  799. static IterationDecision for_each(Callback);
  800. template<VoidFunction<Thread&> Callback>
  801. static IterationDecision for_each_in_state(State, Callback);
  802. template<VoidFunction<Thread&> Callback>
  803. static IterationDecision for_each(Callback);
  804. static constexpr u32 default_kernel_stack_size = 65536;
  805. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  806. u64 time_in_user() const { return m_total_time_scheduled_user.load(AK::MemoryOrder::memory_order_relaxed); }
  807. u64 time_in_kernel() const { return m_total_time_scheduled_kernel.load(AK::MemoryOrder::memory_order_relaxed); }
  808. ExecutionMode previous_mode() const { return m_previous_mode; }
  809. bool set_previous_mode(ExecutionMode mode)
  810. {
  811. if (m_previous_mode == mode)
  812. return false;
  813. m_previous_mode = mode;
  814. return true;
  815. }
  816. TrapFrame*& current_trap() { return m_current_trap; }
  817. TrapFrame const* const& current_trap() const { return m_current_trap; }
  818. RecursiveSpinlock<LockRank::Thread>& get_lock() const { return m_lock; }
  819. #if LOCK_DEBUG
  820. void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
  821. {
  822. VERIFY(refs_delta != 0);
  823. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  824. SpinlockLocker list_lock(m_holding_locks_lock);
  825. if (refs_delta > 0) {
  826. bool have_existing = false;
  827. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  828. auto& info = m_holding_locks_list[i];
  829. if (info.lock == &lock) {
  830. have_existing = true;
  831. info.count += refs_delta;
  832. break;
  833. }
  834. }
  835. if (!have_existing)
  836. m_holding_locks_list.append({ &lock, location, 1 });
  837. } else {
  838. VERIFY(refs_delta < 0);
  839. bool found = false;
  840. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  841. auto& info = m_holding_locks_list[i];
  842. if (info.lock == &lock) {
  843. VERIFY(info.count >= (unsigned)-refs_delta);
  844. info.count -= (unsigned)-refs_delta;
  845. if (info.count == 0)
  846. m_holding_locks_list.remove(i);
  847. found = true;
  848. break;
  849. }
  850. }
  851. VERIFY(found);
  852. }
  853. }
  854. u32 lock_count() const
  855. {
  856. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  857. }
  858. #endif
  859. bool is_handling_page_fault() const
  860. {
  861. return m_handling_page_fault;
  862. }
  863. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  864. void set_idle_thread() { m_is_idle_thread = true; }
  865. bool is_idle_thread() const { return m_is_idle_thread; }
  866. void set_crashing() { m_is_crashing = true; }
  867. [[nodiscard]] bool is_crashing() const { return m_is_crashing; }
  868. ALWAYS_INLINE u32 enter_profiler()
  869. {
  870. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  871. }
  872. ALWAYS_INLINE u32 leave_profiler()
  873. {
  874. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  875. }
  876. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  877. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  878. bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
  879. void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
  880. bool is_allocation_enabled() const { return m_allocation_enabled; }
  881. void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
  882. ErrorOr<NonnullOwnPtr<KString>> backtrace();
  883. Blocker const* blocker() const { return m_blocker; };
  884. Kernel::Mutex const* blocking_mutex() const { return m_blocking_mutex; }
  885. #if LOCK_DEBUG
  886. struct HoldingLockInfo {
  887. Mutex* lock;
  888. LockLocation lock_location;
  889. unsigned count;
  890. };
  891. template<IteratorFunction<HoldingLockInfo const&> Callback>
  892. void for_each_held_lock(Callback);
  893. template<VoidFunction<HoldingLockInfo const&> Callback>
  894. void for_each_held_lock(Callback);
  895. #endif
  896. private:
  897. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, NonnullOwnPtr<KString>);
  898. BlockResult block_impl(BlockTimeout const&, Blocker&);
  899. IntrusiveListNode<Thread> m_process_thread_list_node;
  900. int m_runnable_priority { -1 };
  901. friend class WaitQueue;
  902. class JoinBlockerSet final : public BlockerSet {
  903. public:
  904. void thread_did_exit(void* exit_value)
  905. {
  906. SpinlockLocker lock(m_lock);
  907. VERIFY(!m_thread_did_exit);
  908. m_thread_did_exit = true;
  909. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  910. do_unblock_joiner();
  911. }
  912. void thread_finalizing()
  913. {
  914. SpinlockLocker lock(m_lock);
  915. do_unblock_joiner();
  916. }
  917. void* exit_value() const
  918. {
  919. VERIFY(m_thread_did_exit);
  920. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  921. }
  922. void try_unblock(JoinBlocker& blocker)
  923. {
  924. SpinlockLocker lock(m_lock);
  925. if (m_thread_did_exit)
  926. blocker.unblock(exit_value(), false);
  927. }
  928. protected:
  929. virtual bool should_add_blocker(Blocker& b, void*) override
  930. {
  931. VERIFY(b.blocker_type() == Blocker::Type::Join);
  932. auto& blocker = static_cast<JoinBlocker&>(b);
  933. // NOTE: m_lock is held already!
  934. if (m_thread_did_exit) {
  935. blocker.unblock(exit_value(), true);
  936. return false;
  937. }
  938. return true;
  939. }
  940. private:
  941. void do_unblock_joiner()
  942. {
  943. unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
  944. VERIFY(b.blocker_type() == Blocker::Type::Join);
  945. auto& blocker = static_cast<JoinBlocker&>(b);
  946. return blocker.unblock(exit_value(), false);
  947. });
  948. }
  949. Atomic<void*> m_exit_value { nullptr };
  950. bool m_thread_did_exit { false };
  951. };
  952. LockMode unlock_process_if_locked(u32&);
  953. void relock_process(LockMode, u32);
  954. void reset_fpu_state();
  955. mutable RecursiveSpinlock<LockRank::Thread> m_lock {};
  956. mutable RecursiveSpinlock<LockRank::None> m_block_lock {};
  957. NonnullRefPtr<Process> const m_process;
  958. ThreadID m_tid { -1 };
  959. ThreadRegisters m_regs {};
  960. DebugRegisterState m_debug_register_state {};
  961. TrapFrame* m_current_trap { nullptr };
  962. u32 m_saved_critical { 1 };
  963. IntrusiveListNode<Thread> m_ready_queue_node;
  964. Atomic<u32> m_cpu { 0 };
  965. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  966. Optional<u64> m_last_time_scheduled;
  967. Atomic<u64> m_total_time_scheduled_user { 0 };
  968. Atomic<u64> m_total_time_scheduled_kernel { 0 };
  969. u32 m_ticks_left { 0 };
  970. u32 m_times_scheduled { 0 };
  971. u32 m_ticks_in_user { 0 };
  972. u32 m_ticks_in_kernel { 0 };
  973. u32 m_pending_signals { 0 };
  974. u8 m_currently_handled_signal { 0 };
  975. u32 m_signal_mask { 0 };
  976. FlatPtr m_alternative_signal_stack { 0 };
  977. FlatPtr m_alternative_signal_stack_size { 0 };
  978. SignalBlockerSet m_signal_blocker_set;
  979. FlatPtr m_kernel_stack_base { 0 };
  980. FlatPtr m_kernel_stack_top { 0 };
  981. NonnullOwnPtr<Memory::Region> m_kernel_stack_region;
  982. VirtualAddress m_thread_specific_data;
  983. Optional<Memory::VirtualRange> m_thread_specific_range;
  984. Array<Optional<u32>, NSIG> m_signal_action_masks;
  985. Array<ProcessID, NSIG> m_signal_senders;
  986. Blocker* m_blocker { nullptr };
  987. Kernel::Mutex* m_blocking_mutex { nullptr };
  988. u32 m_lock_requested_count { 0 };
  989. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  990. LockRank m_lock_rank_mask {};
  991. bool m_allocation_enabled { true };
  992. // FIXME: remove this after annihilating Process::m_big_lock
  993. IntrusiveListNode<Thread> m_big_lock_blocked_threads_list_node;
  994. #if LOCK_DEBUG
  995. Atomic<u32> m_holding_locks { 0 };
  996. Spinlock<LockRank::None> m_holding_locks_lock {};
  997. Vector<HoldingLockInfo> m_holding_locks_list;
  998. #endif
  999. JoinBlockerSet m_join_blocker_set;
  1000. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1001. bool m_is_joinable { true };
  1002. bool m_handling_page_fault { false };
  1003. ExecutionMode m_previous_mode { ExecutionMode::Kernel }; // We always start out in kernel mode
  1004. unsigned m_syscall_count { 0 };
  1005. unsigned m_inode_faults { 0 };
  1006. unsigned m_zero_faults { 0 };
  1007. unsigned m_cow_faults { 0 };
  1008. u64 m_file_read_bytes { 0 };
  1009. u64 m_file_write_bytes { 0 };
  1010. u64 m_unix_socket_read_bytes { 0 };
  1011. u64 m_unix_socket_write_bytes { 0 };
  1012. u64 m_ipv4_socket_read_bytes { 0 };
  1013. u64 m_ipv4_socket_write_bytes { 0 };
  1014. FPUState m_fpu_state {};
  1015. State m_state { Thread::State::Invalid };
  1016. SpinlockProtected<NonnullOwnPtr<KString>, LockRank::None> m_name;
  1017. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1018. State m_stop_state { Thread::State::Invalid };
  1019. bool m_dump_backtrace_on_finalization { false };
  1020. bool m_should_die { false };
  1021. bool m_initialized { false };
  1022. bool m_is_idle_thread { false };
  1023. bool m_is_crashing { false };
  1024. bool m_is_promise_violation_pending { false };
  1025. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1026. Atomic<u32> m_nested_profiler_calls { 0 };
  1027. NonnullRefPtr<Timer> const m_block_timer;
  1028. bool m_is_profiling_suppressed { false };
  1029. void yield_and_release_relock_big_lock();
  1030. enum class VerifyLockNotHeld {
  1031. Yes,
  1032. No
  1033. };
  1034. void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
  1035. void drop_thread_count();
  1036. mutable IntrusiveListNode<Thread> m_global_thread_list_node;
  1037. public:
  1038. using ListInProcess = IntrusiveList<&Thread::m_process_thread_list_node>;
  1039. using GlobalList = IntrusiveList<&Thread::m_global_thread_list_node>;
  1040. static SpinlockProtected<GlobalList, LockRank::None>& all_instances();
  1041. };
  1042. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1043. template<IteratorFunction<Thread&> Callback>
  1044. inline IterationDecision Thread::for_each(Callback callback)
  1045. {
  1046. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1047. for (auto& thread : list) {
  1048. IterationDecision decision = callback(thread);
  1049. if (decision != IterationDecision::Continue)
  1050. return decision;
  1051. }
  1052. return IterationDecision::Continue;
  1053. });
  1054. }
  1055. template<IteratorFunction<Thread&> Callback>
  1056. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1057. {
  1058. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1059. for (auto& thread : list) {
  1060. if (thread.state() != state)
  1061. continue;
  1062. IterationDecision decision = callback(thread);
  1063. if (decision != IterationDecision::Continue)
  1064. return decision;
  1065. }
  1066. return IterationDecision::Continue;
  1067. });
  1068. }
  1069. template<VoidFunction<Thread&> Callback>
  1070. inline IterationDecision Thread::for_each(Callback callback)
  1071. {
  1072. return Thread::all_instances().with([&](auto& list) {
  1073. for (auto& thread : list) {
  1074. if (callback(thread) == IterationDecision::Break)
  1075. return IterationDecision::Break;
  1076. }
  1077. return IterationDecision::Continue;
  1078. });
  1079. }
  1080. template<VoidFunction<Thread&> Callback>
  1081. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1082. {
  1083. return for_each_in_state(state, [&](auto& thread) {
  1084. callback(thread);
  1085. return IterationDecision::Continue;
  1086. });
  1087. }
  1088. #if LOCK_DEBUG
  1089. template<IteratorFunction<Thread::HoldingLockInfo const&> Callback>
  1090. inline void Thread::for_each_held_lock(Callback callback)
  1091. {
  1092. SpinlockLocker list_lock(m_holding_locks_lock);
  1093. for (auto const& lock_info : m_holding_locks_list) {
  1094. if (callback(lock_info) == IterationDecision::Break)
  1095. break;
  1096. }
  1097. }
  1098. template<VoidFunction<Thread::HoldingLockInfo const&> Callback>
  1099. inline void Thread::for_each_held_lock(Callback callback)
  1100. {
  1101. for_each_held_lock([&](auto const& lock_info) {
  1102. callback(lock_info);
  1103. return IterationDecision::Continue;
  1104. });
  1105. }
  1106. #endif
  1107. }
  1108. template<>
  1109. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1110. ErrorOr<void> format(FormatBuilder&, Kernel::Thread const&);
  1111. };