Thread.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/Error.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #include <AK/Time.h>
  14. #include <AK/Variant.h>
  15. #include <AK/Vector.h>
  16. #include <AK/WeakPtr.h>
  17. #include <AK/Weakable.h>
  18. #include <Kernel/Arch/RegisterState.h>
  19. #include <Kernel/Debug.h>
  20. #include <Kernel/Forward.h>
  21. #include <Kernel/KString.h>
  22. #include <Kernel/Library/ListedRefCounted.h>
  23. #include <Kernel/Locking/LockLocation.h>
  24. #include <Kernel/Locking/LockMode.h>
  25. #include <Kernel/Locking/LockRank.h>
  26. #include <Kernel/Locking/SpinlockProtected.h>
  27. #include <Kernel/Memory/VirtualRange.h>
  28. #include <Kernel/UnixTypes.h>
  29. #include <LibC/fd_set.h>
  30. #include <LibC/signal_numbers.h>
  31. namespace Kernel {
  32. class Timer;
  33. namespace Memory {
  34. extern RecursiveSpinlock s_mm_lock;
  35. }
  36. enum class DispatchSignalResult {
  37. Deferred = 0,
  38. Yield,
  39. Terminate,
  40. Continue
  41. };
  42. struct ThreadSpecificData {
  43. ThreadSpecificData* self;
  44. };
  45. #define THREAD_PRIORITY_MIN 1
  46. #define THREAD_PRIORITY_LOW 10
  47. #define THREAD_PRIORITY_NORMAL 30
  48. #define THREAD_PRIORITY_HIGH 50
  49. #define THREAD_PRIORITY_MAX 99
  50. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  51. struct ThreadRegisters {
  52. #if ARCH(I386)
  53. FlatPtr ss;
  54. FlatPtr gs;
  55. FlatPtr fs;
  56. FlatPtr es;
  57. FlatPtr ds;
  58. FlatPtr edi;
  59. FlatPtr esi;
  60. FlatPtr ebp;
  61. FlatPtr esp;
  62. FlatPtr ebx;
  63. FlatPtr edx;
  64. FlatPtr ecx;
  65. FlatPtr eax;
  66. FlatPtr eip;
  67. FlatPtr esp0;
  68. FlatPtr ss0;
  69. #else
  70. FlatPtr rdi;
  71. FlatPtr rsi;
  72. FlatPtr rbp;
  73. FlatPtr rsp;
  74. FlatPtr rbx;
  75. FlatPtr rdx;
  76. FlatPtr rcx;
  77. FlatPtr rax;
  78. FlatPtr r8;
  79. FlatPtr r9;
  80. FlatPtr r10;
  81. FlatPtr r11;
  82. FlatPtr r12;
  83. FlatPtr r13;
  84. FlatPtr r14;
  85. FlatPtr r15;
  86. FlatPtr rip;
  87. FlatPtr rsp0;
  88. #endif
  89. FlatPtr cs;
  90. #if ARCH(I386)
  91. FlatPtr eflags;
  92. FlatPtr flags() const { return eflags; }
  93. void set_flags(FlatPtr value) { eflags = value; }
  94. void set_sp(FlatPtr value) { esp = value; }
  95. void set_sp0(FlatPtr value) { esp0 = value; }
  96. void set_ip(FlatPtr value) { eip = value; }
  97. #else
  98. FlatPtr rflags;
  99. FlatPtr flags() const { return rflags; }
  100. void set_flags(FlatPtr value) { rflags = value; }
  101. void set_sp(FlatPtr value) { rsp = value; }
  102. void set_sp0(FlatPtr value) { rsp0 = value; }
  103. void set_ip(FlatPtr value) { rip = value; }
  104. #endif
  105. FlatPtr cr3;
  106. FlatPtr ip() const
  107. {
  108. #if ARCH(I386)
  109. return eip;
  110. #else
  111. return rip;
  112. #endif
  113. }
  114. FlatPtr sp() const
  115. {
  116. #if ARCH(I386)
  117. return esp;
  118. #else
  119. return rsp;
  120. #endif
  121. }
  122. };
  123. class Thread
  124. : public ListedRefCounted<Thread, LockType::Spinlock>
  125. , public Weakable<Thread> {
  126. AK_MAKE_NONCOPYABLE(Thread);
  127. AK_MAKE_NONMOVABLE(Thread);
  128. friend class Mutex;
  129. friend class Process;
  130. friend class Scheduler;
  131. friend struct ThreadReadyQueue;
  132. public:
  133. inline static Thread* current()
  134. {
  135. return Processor::current_thread();
  136. }
  137. static ErrorOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  138. ~Thread();
  139. static RefPtr<Thread> from_tid(ThreadID);
  140. static void finalize_dying_threads();
  141. ThreadID tid() const { return m_tid; }
  142. ProcessID pid() const;
  143. void set_priority(u32 p) { m_priority = p; }
  144. u32 priority() const { return m_priority; }
  145. void detach()
  146. {
  147. SpinlockLocker lock(m_lock);
  148. m_is_joinable = false;
  149. }
  150. [[nodiscard]] bool is_joinable() const
  151. {
  152. SpinlockLocker lock(m_lock);
  153. return m_is_joinable;
  154. }
  155. Process& process() { return m_process; }
  156. const Process& process() const { return m_process; }
  157. // NOTE: This returns a null-terminated string.
  158. StringView name() const
  159. {
  160. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  161. VERIFY(m_lock.is_locked_by_current_processor());
  162. return m_name->view();
  163. }
  164. void set_name(NonnullOwnPtr<KString> name)
  165. {
  166. SpinlockLocker lock(m_lock);
  167. m_name = move(name);
  168. }
  169. void finalize();
  170. enum class State : u8 {
  171. Invalid = 0,
  172. Runnable,
  173. Running,
  174. Dying,
  175. Dead,
  176. Stopped,
  177. Blocked,
  178. };
  179. class [[nodiscard]] BlockResult {
  180. public:
  181. enum Type {
  182. WokeNormally,
  183. NotBlocked,
  184. InterruptedBySignal,
  185. InterruptedByDeath,
  186. InterruptedByTimeout,
  187. };
  188. BlockResult() = delete;
  189. BlockResult(Type type)
  190. : m_type(type)
  191. {
  192. }
  193. bool operator==(Type type) const
  194. {
  195. return m_type == type;
  196. }
  197. bool operator!=(Type type) const
  198. {
  199. return m_type != type;
  200. }
  201. [[nodiscard]] bool was_interrupted() const
  202. {
  203. switch (m_type) {
  204. case InterruptedBySignal:
  205. case InterruptedByDeath:
  206. return true;
  207. default:
  208. return false;
  209. }
  210. }
  211. private:
  212. Type m_type;
  213. };
  214. class BlockTimeout {
  215. public:
  216. BlockTimeout()
  217. : m_infinite(true)
  218. {
  219. }
  220. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  221. const Time& absolute_time() const { return m_time; }
  222. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  223. clockid_t clock_id() const { return m_clock_id; }
  224. bool is_infinite() const { return m_infinite; }
  225. private:
  226. Time m_time {};
  227. Time m_start_time {};
  228. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  229. bool m_infinite { false };
  230. };
  231. class BlockerSet;
  232. class Blocker {
  233. AK_MAKE_NONMOVABLE(Blocker);
  234. AK_MAKE_NONCOPYABLE(Blocker);
  235. public:
  236. enum class Type {
  237. Unknown = 0,
  238. File,
  239. Futex,
  240. Plan9FS,
  241. Join,
  242. Queue,
  243. Routing,
  244. Sleep,
  245. Signal,
  246. Wait
  247. };
  248. virtual ~Blocker();
  249. virtual StringView state_string() const = 0;
  250. virtual Type blocker_type() const = 0;
  251. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  252. virtual bool can_be_interrupted() const { return true; }
  253. virtual bool setup_blocker();
  254. virtual void finalize();
  255. Thread& thread() { return m_thread; }
  256. enum class UnblockImmediatelyReason {
  257. UnblockConditionAlreadyMet,
  258. TimeoutInThePast,
  259. };
  260. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
  261. virtual void was_unblocked(bool did_timeout)
  262. {
  263. if (did_timeout) {
  264. SpinlockLocker lock(m_lock);
  265. m_did_timeout = true;
  266. }
  267. }
  268. void set_interrupted_by_death()
  269. {
  270. SpinlockLocker lock(m_lock);
  271. do_set_interrupted_by_death();
  272. }
  273. void set_interrupted_by_signal(u8 signal)
  274. {
  275. SpinlockLocker lock(m_lock);
  276. do_set_interrupted_by_signal(signal);
  277. }
  278. u8 was_interrupted_by_signal() const
  279. {
  280. SpinlockLocker lock(m_lock);
  281. return do_get_interrupted_by_signal();
  282. }
  283. virtual Thread::BlockResult block_result()
  284. {
  285. SpinlockLocker lock(m_lock);
  286. if (m_was_interrupted_by_death)
  287. return Thread::BlockResult::InterruptedByDeath;
  288. if (m_was_interrupted_by_signal != 0)
  289. return Thread::BlockResult::InterruptedBySignal;
  290. if (m_did_timeout)
  291. return Thread::BlockResult::InterruptedByTimeout;
  292. return Thread::BlockResult::WokeNormally;
  293. }
  294. void begin_blocking(Badge<Thread>);
  295. BlockResult end_blocking(Badge<Thread>, bool);
  296. protected:
  297. Blocker()
  298. : m_thread(*Thread::current())
  299. {
  300. }
  301. void do_set_interrupted_by_death()
  302. {
  303. m_was_interrupted_by_death = true;
  304. }
  305. void do_set_interrupted_by_signal(u8 signal)
  306. {
  307. VERIFY(signal != 0);
  308. m_was_interrupted_by_signal = signal;
  309. }
  310. void do_clear_interrupted_by_signal()
  311. {
  312. m_was_interrupted_by_signal = 0;
  313. }
  314. u8 do_get_interrupted_by_signal() const
  315. {
  316. return m_was_interrupted_by_signal;
  317. }
  318. [[nodiscard]] bool was_interrupted() const
  319. {
  320. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  321. }
  322. void unblock_from_blocker()
  323. {
  324. {
  325. SpinlockLocker lock(m_lock);
  326. if (!m_is_blocking)
  327. return;
  328. m_is_blocking = false;
  329. }
  330. m_thread->unblock_from_blocker(*this);
  331. }
  332. bool add_to_blocker_set(BlockerSet&, void* = nullptr);
  333. void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
  334. mutable RecursiveSpinlock m_lock;
  335. private:
  336. BlockerSet* m_blocker_set { nullptr };
  337. NonnullRefPtr<Thread> m_thread;
  338. u8 m_was_interrupted_by_signal { 0 };
  339. bool m_is_blocking { false };
  340. bool m_was_interrupted_by_death { false };
  341. bool m_did_timeout { false };
  342. };
  343. class BlockerSet {
  344. AK_MAKE_NONCOPYABLE(BlockerSet);
  345. AK_MAKE_NONMOVABLE(BlockerSet);
  346. public:
  347. BlockerSet() = default;
  348. virtual ~BlockerSet()
  349. {
  350. VERIFY(!m_lock.is_locked());
  351. VERIFY(m_blockers.is_empty());
  352. }
  353. bool add_blocker(Blocker& blocker, void* data)
  354. {
  355. SpinlockLocker lock(m_lock);
  356. if (!should_add_blocker(blocker, data))
  357. return false;
  358. m_blockers.append({ &blocker, data });
  359. return true;
  360. }
  361. void remove_blocker(Blocker& blocker)
  362. {
  363. SpinlockLocker lock(m_lock);
  364. // NOTE: it's possible that the blocker is no longer present
  365. m_blockers.remove_all_matching([&](auto& info) {
  366. return info.blocker == &blocker;
  367. });
  368. }
  369. bool is_empty() const
  370. {
  371. SpinlockLocker lock(m_lock);
  372. return is_empty_locked();
  373. }
  374. protected:
  375. template<typename Callback>
  376. bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
  377. {
  378. SpinlockLocker lock(m_lock);
  379. return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
  380. }
  381. template<typename Callback>
  382. bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
  383. {
  384. VERIFY(m_lock.is_locked());
  385. bool stop_iterating = false;
  386. bool did_unblock_any = false;
  387. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  388. auto& info = m_blockers[i];
  389. if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
  390. m_blockers.remove(i);
  391. did_unblock_any = true;
  392. continue;
  393. }
  394. i++;
  395. }
  396. return did_unblock_any;
  397. }
  398. bool is_empty_locked() const
  399. {
  400. VERIFY(m_lock.is_locked());
  401. return m_blockers.is_empty();
  402. }
  403. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  404. struct BlockerInfo {
  405. Blocker* blocker;
  406. void* data;
  407. };
  408. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  409. {
  410. if (m_blockers.size() <= count)
  411. return move(m_blockers);
  412. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  413. VERIFY(move_count > 0);
  414. Vector<BlockerInfo, 4> taken_blockers;
  415. taken_blockers.ensure_capacity(move_count);
  416. for (size_t i = 0; i < move_count; i++)
  417. taken_blockers.append(m_blockers.take(i));
  418. m_blockers.remove(0, move_count);
  419. return taken_blockers;
  420. }
  421. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  422. {
  423. if (blockers_to_append.is_empty())
  424. return;
  425. if (m_blockers.is_empty()) {
  426. m_blockers = move(blockers_to_append);
  427. return;
  428. }
  429. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  430. for (size_t i = 0; i < blockers_to_append.size(); i++)
  431. m_blockers.append(blockers_to_append.take(i));
  432. blockers_to_append.clear();
  433. }
  434. mutable Spinlock m_lock;
  435. private:
  436. Vector<BlockerInfo, 4> m_blockers;
  437. };
  438. friend class JoinBlocker;
  439. class JoinBlocker final : public Blocker {
  440. public:
  441. explicit JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value);
  442. virtual Type blocker_type() const override { return Type::Join; }
  443. virtual StringView state_string() const override { return "Joining"sv; }
  444. virtual bool can_be_interrupted() const override { return false; }
  445. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  446. virtual bool setup_blocker() override;
  447. bool unblock(void*, bool);
  448. private:
  449. NonnullRefPtr<Thread> m_joinee;
  450. void*& m_joinee_exit_value;
  451. ErrorOr<void>& m_try_join_result;
  452. bool m_did_unblock { false };
  453. };
  454. class WaitQueueBlocker final : public Blocker {
  455. public:
  456. explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
  457. virtual ~WaitQueueBlocker();
  458. virtual Type blocker_type() const override { return Type::Queue; }
  459. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  460. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  461. virtual bool setup_blocker() override;
  462. bool unblock();
  463. protected:
  464. WaitQueue& m_wait_queue;
  465. StringView m_block_reason;
  466. bool m_did_unblock { false };
  467. };
  468. class FutexBlocker final : public Blocker {
  469. public:
  470. explicit FutexBlocker(FutexQueue&, u32);
  471. virtual ~FutexBlocker();
  472. virtual Type blocker_type() const override { return Type::Futex; }
  473. virtual StringView state_string() const override { return "Futex"sv; }
  474. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  475. virtual bool setup_blocker() override;
  476. u32 bitset() const { return m_bitset; }
  477. void begin_requeue()
  478. {
  479. // We need to hold the lock until we moved it over
  480. m_relock_flags = m_lock.lock();
  481. }
  482. void finish_requeue(FutexQueue&);
  483. bool unblock_bitset(u32 bitset);
  484. bool unblock(bool force = false);
  485. protected:
  486. FutexQueue& m_futex_queue;
  487. u32 m_bitset { 0 };
  488. u32 m_relock_flags { 0 };
  489. bool m_did_unblock { false };
  490. };
  491. class FileBlocker : public Blocker {
  492. public:
  493. enum class BlockFlags : u16 {
  494. None = 0,
  495. Read = 1 << 0,
  496. Write = 1 << 1,
  497. ReadPriority = 1 << 2,
  498. WritePriority = 1 << 3,
  499. Accept = 1 << 4,
  500. Connect = 1 << 5,
  501. SocketFlags = Accept | Connect,
  502. WriteNotOpen = 1 << 6,
  503. WriteError = 1 << 7,
  504. WriteHangUp = 1 << 8,
  505. ReadHangUp = 1 << 9,
  506. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  507. };
  508. virtual Type blocker_type() const override { return Type::File; }
  509. virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
  510. };
  511. class OpenFileDescriptionBlocker : public FileBlocker {
  512. public:
  513. const OpenFileDescription& blocked_description() const;
  514. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  515. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  516. virtual bool setup_blocker() override;
  517. protected:
  518. explicit OpenFileDescriptionBlocker(OpenFileDescription&, BlockFlags, BlockFlags&);
  519. private:
  520. NonnullRefPtr<OpenFileDescription> m_blocked_description;
  521. const BlockFlags m_flags;
  522. BlockFlags& m_unblocked_flags;
  523. bool m_did_unblock { false };
  524. };
  525. class AcceptBlocker final : public OpenFileDescriptionBlocker {
  526. public:
  527. explicit AcceptBlocker(OpenFileDescription&, BlockFlags&);
  528. virtual StringView state_string() const override { return "Accepting"sv; }
  529. };
  530. class ConnectBlocker final : public OpenFileDescriptionBlocker {
  531. public:
  532. explicit ConnectBlocker(OpenFileDescription&, BlockFlags&);
  533. virtual StringView state_string() const override { return "Connecting"sv; }
  534. };
  535. class WriteBlocker final : public OpenFileDescriptionBlocker {
  536. public:
  537. explicit WriteBlocker(OpenFileDescription&, BlockFlags&);
  538. virtual StringView state_string() const override { return "Writing"sv; }
  539. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  540. private:
  541. BlockTimeout m_timeout;
  542. };
  543. class ReadBlocker final : public OpenFileDescriptionBlocker {
  544. public:
  545. explicit ReadBlocker(OpenFileDescription&, BlockFlags&);
  546. virtual StringView state_string() const override { return "Reading"sv; }
  547. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  548. private:
  549. BlockTimeout m_timeout;
  550. };
  551. class SleepBlocker final : public Blocker {
  552. public:
  553. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  554. virtual StringView state_string() const override { return "Sleeping"sv; }
  555. virtual Type blocker_type() const override { return Type::Sleep; }
  556. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  557. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  558. virtual void was_unblocked(bool) override;
  559. virtual Thread::BlockResult block_result() override;
  560. private:
  561. void calculate_remaining();
  562. BlockTimeout m_deadline;
  563. Time* m_remaining;
  564. };
  565. class SelectBlocker final : public FileBlocker {
  566. public:
  567. struct FDInfo {
  568. NonnullRefPtr<OpenFileDescription> description;
  569. BlockFlags block_flags { BlockFlags::None };
  570. BlockFlags unblocked_flags { BlockFlags::None };
  571. };
  572. using FDVector = Vector<FDInfo, FD_SETSIZE>;
  573. explicit SelectBlocker(FDVector&);
  574. virtual ~SelectBlocker();
  575. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  576. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  577. virtual void was_unblocked(bool) override;
  578. virtual StringView state_string() const override { return "Selecting"sv; }
  579. virtual bool setup_blocker() override;
  580. virtual void finalize() override;
  581. private:
  582. size_t collect_unblocked_flags();
  583. FDVector& m_fds;
  584. bool m_did_unblock { false };
  585. };
  586. class SignalBlocker final : public Blocker {
  587. public:
  588. explicit SignalBlocker(sigset_t pending_set, siginfo_t& result);
  589. virtual StringView state_string() const override { return "Pending Signal"sv; }
  590. virtual Type blocker_type() const override { return Type::Signal; }
  591. void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  592. virtual bool setup_blocker() override;
  593. bool check_pending_signals(bool from_add_blocker);
  594. private:
  595. sigset_t m_pending_set { 0 };
  596. siginfo_t& m_result;
  597. bool m_did_unblock { false };
  598. };
  599. class SignalBlockerSet final : public BlockerSet {
  600. public:
  601. void unblock_all_blockers_whose_conditions_are_met()
  602. {
  603. BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
  604. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  605. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  606. return blocker.check_pending_signals(false);
  607. });
  608. }
  609. private:
  610. bool should_add_blocker(Blocker& b, void*) override
  611. {
  612. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  613. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  614. return !blocker.check_pending_signals(true);
  615. }
  616. };
  617. class WaitBlocker final : public Blocker {
  618. public:
  619. enum class UnblockFlags {
  620. Terminated,
  621. Stopped,
  622. Continued,
  623. Disowned
  624. };
  625. WaitBlocker(int wait_options, Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result);
  626. virtual StringView state_string() const override { return "Waiting"sv; }
  627. virtual Type blocker_type() const override { return Type::Wait; }
  628. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  629. virtual void was_unblocked(bool) override;
  630. virtual bool setup_blocker() override;
  631. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  632. bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
  633. private:
  634. void do_was_disowned();
  635. void do_set_result(const siginfo_t&);
  636. const int m_wait_options;
  637. ErrorOr<siginfo_t>& m_result;
  638. Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> m_waitee;
  639. bool m_did_unblock { false };
  640. bool m_got_sigchild { false };
  641. };
  642. class WaitBlockerSet final : public BlockerSet {
  643. friend class WaitBlocker;
  644. public:
  645. explicit WaitBlockerSet(Process& process)
  646. : m_process(process)
  647. {
  648. }
  649. void disowned_by_waiter(Process&);
  650. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  651. void try_unblock(WaitBlocker&);
  652. void finalize();
  653. protected:
  654. virtual bool should_add_blocker(Blocker&, void*) override;
  655. private:
  656. struct ProcessBlockInfo {
  657. NonnullRefPtr<Process> process;
  658. WaitBlocker::UnblockFlags flags;
  659. u8 signal;
  660. bool was_waited { false };
  661. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  662. ~ProcessBlockInfo();
  663. };
  664. Process& m_process;
  665. Vector<ProcessBlockInfo, 2> m_processes;
  666. bool m_finalized { false };
  667. };
  668. template<typename AddBlockerHandler>
  669. ErrorOr<void> try_join(AddBlockerHandler add_blocker)
  670. {
  671. if (Thread::current() == this)
  672. return EDEADLK;
  673. SpinlockLocker lock(m_lock);
  674. if (!m_is_joinable || state() == Thread::State::Dead)
  675. return EINVAL;
  676. add_blocker();
  677. // From this point on the thread is no longer joinable by anyone
  678. // else. It also means that if the join is timed, it becomes
  679. // detached when a timeout happens.
  680. m_is_joinable = false;
  681. return {};
  682. }
  683. void did_schedule() { ++m_times_scheduled; }
  684. u32 times_scheduled() const { return m_times_scheduled; }
  685. void resume_from_stopped();
  686. [[nodiscard]] bool should_be_stopped() const;
  687. [[nodiscard]] bool is_stopped() const { return m_state == Thread::State::Stopped; }
  688. [[nodiscard]] bool is_blocked() const { return m_state == Thread::State::Blocked; }
  689. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  690. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  691. u32 affinity() const { return m_cpu_affinity; }
  692. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  693. RegisterState& get_register_dump_from_stack();
  694. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  695. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  696. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  697. ThreadRegisters& regs() { return m_regs; }
  698. ThreadRegisters const& regs() const { return m_regs; }
  699. State state() const { return m_state; }
  700. StringView state_string() const;
  701. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  702. size_t thread_specific_region_size() const;
  703. size_t thread_specific_region_alignment() const;
  704. ALWAYS_INLINE void yield_if_stopped()
  705. {
  706. // If some thread stopped us, we need to yield to someone else
  707. // We check this when entering/exiting a system call. A thread
  708. // may continue to execute in user land until the next timer
  709. // tick or entering the next system call, or if it's in kernel
  710. // mode then we will intercept prior to returning back to user
  711. // mode.
  712. SpinlockLocker lock(m_lock);
  713. while (state() == Thread::State::Stopped) {
  714. lock.unlock();
  715. // We shouldn't be holding the big lock here
  716. yield_without_releasing_big_lock();
  717. lock.lock();
  718. }
  719. }
  720. void block(Kernel::Mutex&, SpinlockLocker<Spinlock>&, u32);
  721. template<typename BlockerType, class... Args>
  722. BlockResult block(BlockTimeout const& timeout, Args&&... args)
  723. {
  724. BlockerType blocker(forward<Args>(args)...);
  725. return block_impl(timeout, blocker);
  726. }
  727. u32 unblock_from_mutex(Kernel::Mutex&);
  728. void unblock_from_blocker(Blocker&);
  729. void unblock(u8 signal = 0);
  730. template<class... Args>
  731. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  732. {
  733. VERIFY(this == Thread::current());
  734. return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  735. }
  736. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  737. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  738. {
  739. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  740. }
  741. BlockResult sleep_until(clockid_t, const Time&);
  742. BlockResult sleep_until(const Time& duration)
  743. {
  744. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  745. }
  746. // Tell this thread to unblock if needed,
  747. // gracefully unwind the stack and die.
  748. void set_should_die();
  749. [[nodiscard]] bool should_die() const { return m_should_die; }
  750. void die_if_needed();
  751. void exit(void* = nullptr);
  752. void update_time_scheduled(u64, bool, bool);
  753. bool tick();
  754. void set_ticks_left(u32 t) { m_ticks_left = t; }
  755. u32 ticks_left() const { return m_ticks_left; }
  756. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  757. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  758. void set_state(State, u8 = 0);
  759. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  760. void set_initialized(bool initialized) { m_initialized = initialized; }
  761. void send_urgent_signal_to_self(u8 signal);
  762. void send_signal(u8 signal, Process* sender);
  763. u32 update_signal_mask(u32 signal_mask);
  764. u32 signal_mask_block(sigset_t signal_set, bool block);
  765. u32 signal_mask() const;
  766. void reset_signals_for_exec();
  767. ErrorOr<FlatPtr> peek_debug_register(u32 register_index);
  768. ErrorOr<void> poke_debug_register(u32 register_index, FlatPtr data);
  769. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  770. DispatchSignalResult dispatch_one_pending_signal();
  771. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  772. DispatchSignalResult dispatch_signal(u8 signal);
  773. void check_dispatch_pending_signal();
  774. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  775. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  776. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  777. [[nodiscard]] bool is_signal_masked(u8 signal) const;
  778. u32 pending_signals() const;
  779. u32 pending_signals_for_state() const;
  780. [[nodiscard]] bool has_alternative_signal_stack() const;
  781. [[nodiscard]] bool is_in_alternative_signal_stack() const;
  782. FPUState& fpu_state() { return m_fpu_state; }
  783. ErrorOr<void> make_thread_specific_region(Badge<Process>);
  784. unsigned syscall_count() const { return m_syscall_count; }
  785. void did_syscall() { ++m_syscall_count; }
  786. unsigned inode_faults() const { return m_inode_faults; }
  787. void did_inode_fault() { ++m_inode_faults; }
  788. unsigned zero_faults() const { return m_zero_faults; }
  789. void did_zero_fault() { ++m_zero_faults; }
  790. unsigned cow_faults() const { return m_cow_faults; }
  791. void did_cow_fault() { ++m_cow_faults; }
  792. unsigned file_read_bytes() const { return m_file_read_bytes; }
  793. unsigned file_write_bytes() const { return m_file_write_bytes; }
  794. void did_file_read(unsigned bytes)
  795. {
  796. m_file_read_bytes += bytes;
  797. }
  798. void did_file_write(unsigned bytes)
  799. {
  800. m_file_write_bytes += bytes;
  801. }
  802. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  803. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  804. void did_unix_socket_read(unsigned bytes)
  805. {
  806. m_unix_socket_read_bytes += bytes;
  807. }
  808. void did_unix_socket_write(unsigned bytes)
  809. {
  810. m_unix_socket_write_bytes += bytes;
  811. }
  812. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  813. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  814. void did_ipv4_socket_read(unsigned bytes)
  815. {
  816. m_ipv4_socket_read_bytes += bytes;
  817. }
  818. void did_ipv4_socket_write(unsigned bytes)
  819. {
  820. m_ipv4_socket_write_bytes += bytes;
  821. }
  822. void set_active(bool active) { m_is_active = active; }
  823. u32 saved_critical() const { return m_saved_critical; }
  824. void save_critical(u32 critical) { m_saved_critical = critical; }
  825. void track_lock_acquire(LockRank rank);
  826. void track_lock_release(LockRank rank);
  827. [[nodiscard]] bool is_active() const { return m_is_active; }
  828. [[nodiscard]] bool is_finalizable() const
  829. {
  830. // We can't finalize as long as this thread is still running
  831. // Note that checking for Running state here isn't sufficient
  832. // as the thread may not be in Running state but switching out.
  833. // m_is_active is set to false once the context switch is
  834. // complete and the thread is not executing on any processor.
  835. if (m_is_active.load(AK::memory_order_acquire))
  836. return false;
  837. // We can't finalize until the thread is either detached or
  838. // a join has started. We can't make m_is_joinable atomic
  839. // because that would introduce a race in try_join.
  840. SpinlockLocker lock(m_lock);
  841. return !m_is_joinable;
  842. }
  843. ErrorOr<NonnullRefPtr<Thread>> try_clone(Process&);
  844. template<IteratorFunction<Thread&> Callback>
  845. static IterationDecision for_each_in_state(State, Callback);
  846. template<IteratorFunction<Thread&> Callback>
  847. static IterationDecision for_each(Callback);
  848. template<VoidFunction<Thread&> Callback>
  849. static IterationDecision for_each_in_state(State, Callback);
  850. template<VoidFunction<Thread&> Callback>
  851. static IterationDecision for_each(Callback);
  852. static constexpr u32 default_kernel_stack_size = 65536;
  853. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  854. u64 time_in_user() const { return m_total_time_scheduled_user; }
  855. u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
  856. enum class PreviousMode : u8 {
  857. KernelMode = 0,
  858. UserMode
  859. };
  860. PreviousMode previous_mode() const { return m_previous_mode; }
  861. bool set_previous_mode(PreviousMode mode)
  862. {
  863. if (m_previous_mode == mode)
  864. return false;
  865. m_previous_mode = mode;
  866. return true;
  867. }
  868. TrapFrame*& current_trap() { return m_current_trap; }
  869. TrapFrame const* const& current_trap() const { return m_current_trap; }
  870. RecursiveSpinlock& get_lock() const { return m_lock; }
  871. #if LOCK_DEBUG
  872. void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
  873. {
  874. VERIFY(refs_delta != 0);
  875. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  876. SpinlockLocker list_lock(m_holding_locks_lock);
  877. if (refs_delta > 0) {
  878. bool have_existing = false;
  879. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  880. auto& info = m_holding_locks_list[i];
  881. if (info.lock == &lock) {
  882. have_existing = true;
  883. info.count += refs_delta;
  884. break;
  885. }
  886. }
  887. if (!have_existing)
  888. m_holding_locks_list.append({ &lock, location, 1 });
  889. } else {
  890. VERIFY(refs_delta < 0);
  891. bool found = false;
  892. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  893. auto& info = m_holding_locks_list[i];
  894. if (info.lock == &lock) {
  895. VERIFY(info.count >= (unsigned)-refs_delta);
  896. info.count -= (unsigned)-refs_delta;
  897. if (info.count == 0)
  898. m_holding_locks_list.remove(i);
  899. found = true;
  900. break;
  901. }
  902. }
  903. VERIFY(found);
  904. }
  905. }
  906. u32 lock_count() const
  907. {
  908. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  909. }
  910. #endif
  911. bool is_handling_page_fault() const
  912. {
  913. return m_handling_page_fault;
  914. }
  915. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  916. void set_idle_thread() { m_is_idle_thread = true; }
  917. bool is_idle_thread() const { return m_is_idle_thread; }
  918. void set_crashing() { m_is_crashing = true; }
  919. [[nodiscard]] bool is_crashing() const { return m_is_crashing; }
  920. ALWAYS_INLINE u32 enter_profiler()
  921. {
  922. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  923. }
  924. ALWAYS_INLINE u32 leave_profiler()
  925. {
  926. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  927. }
  928. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  929. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  930. bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
  931. void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
  932. bool is_allocation_enabled() const { return m_allocation_enabled; }
  933. void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
  934. ErrorOr<NonnullOwnPtr<KString>> backtrace();
  935. private:
  936. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, NonnullOwnPtr<KString>);
  937. BlockResult block_impl(BlockTimeout const&, Blocker&);
  938. IntrusiveListNode<Thread> m_process_thread_list_node;
  939. int m_runnable_priority { -1 };
  940. friend class WaitQueue;
  941. class JoinBlockerSet final : public BlockerSet {
  942. public:
  943. void thread_did_exit(void* exit_value)
  944. {
  945. SpinlockLocker lock(m_lock);
  946. VERIFY(!m_thread_did_exit);
  947. m_thread_did_exit = true;
  948. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  949. do_unblock_joiner();
  950. }
  951. void thread_finalizing()
  952. {
  953. SpinlockLocker lock(m_lock);
  954. do_unblock_joiner();
  955. }
  956. void* exit_value() const
  957. {
  958. VERIFY(m_thread_did_exit);
  959. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  960. }
  961. void try_unblock(JoinBlocker& blocker)
  962. {
  963. SpinlockLocker lock(m_lock);
  964. if (m_thread_did_exit)
  965. blocker.unblock(exit_value(), false);
  966. }
  967. protected:
  968. virtual bool should_add_blocker(Blocker& b, void*) override
  969. {
  970. VERIFY(b.blocker_type() == Blocker::Type::Join);
  971. auto& blocker = static_cast<JoinBlocker&>(b);
  972. // NOTE: m_lock is held already!
  973. if (m_thread_did_exit) {
  974. blocker.unblock(exit_value(), true);
  975. return false;
  976. }
  977. return true;
  978. }
  979. private:
  980. void do_unblock_joiner()
  981. {
  982. unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
  983. VERIFY(b.blocker_type() == Blocker::Type::Join);
  984. auto& blocker = static_cast<JoinBlocker&>(b);
  985. return blocker.unblock(exit_value(), false);
  986. });
  987. }
  988. Atomic<void*> m_exit_value { nullptr };
  989. bool m_thread_did_exit { false };
  990. };
  991. LockMode unlock_process_if_locked(u32&);
  992. void relock_process(LockMode, u32);
  993. void reset_fpu_state();
  994. mutable RecursiveSpinlock m_lock { LockRank::Thread };
  995. mutable RecursiveSpinlock m_block_lock;
  996. NonnullRefPtr<Process> m_process;
  997. ThreadID m_tid { -1 };
  998. ThreadRegisters m_regs {};
  999. DebugRegisterState m_debug_register_state {};
  1000. TrapFrame* m_current_trap { nullptr };
  1001. u32 m_saved_critical { 1 };
  1002. IntrusiveListNode<Thread> m_ready_queue_node;
  1003. Atomic<u32> m_cpu { 0 };
  1004. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1005. Optional<u64> m_last_time_scheduled;
  1006. u64 m_total_time_scheduled_user { 0 };
  1007. u64 m_total_time_scheduled_kernel { 0 };
  1008. u32 m_ticks_left { 0 };
  1009. u32 m_times_scheduled { 0 };
  1010. u32 m_ticks_in_user { 0 };
  1011. u32 m_ticks_in_kernel { 0 };
  1012. u32 m_pending_signals { 0 };
  1013. u32 m_signal_mask { 0 };
  1014. FlatPtr m_alternative_signal_stack { 0 };
  1015. FlatPtr m_alternative_signal_stack_size { 0 };
  1016. SignalBlockerSet m_signal_blocker_set;
  1017. FlatPtr m_kernel_stack_base { 0 };
  1018. FlatPtr m_kernel_stack_top { 0 };
  1019. NonnullOwnPtr<Memory::Region> m_kernel_stack_region;
  1020. VirtualAddress m_thread_specific_data;
  1021. Optional<Memory::VirtualRange> m_thread_specific_range;
  1022. Array<Optional<u32>, NSIG> m_signal_action_masks;
  1023. Blocker* m_blocker { nullptr };
  1024. Kernel::Mutex* m_blocking_mutex { nullptr };
  1025. u32 m_lock_requested_count { 0 };
  1026. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1027. LockRank m_lock_rank_mask { LockRank::None };
  1028. bool m_allocation_enabled { true };
  1029. #if LOCK_DEBUG
  1030. struct HoldingLockInfo {
  1031. Mutex* lock;
  1032. LockLocation lock_location;
  1033. unsigned count;
  1034. };
  1035. Atomic<u32> m_holding_locks { 0 };
  1036. Spinlock m_holding_locks_lock;
  1037. Vector<HoldingLockInfo> m_holding_locks_list;
  1038. #endif
  1039. JoinBlockerSet m_join_blocker_set;
  1040. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1041. bool m_is_joinable { true };
  1042. bool m_handling_page_fault { false };
  1043. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1044. unsigned m_syscall_count { 0 };
  1045. unsigned m_inode_faults { 0 };
  1046. unsigned m_zero_faults { 0 };
  1047. unsigned m_cow_faults { 0 };
  1048. unsigned m_file_read_bytes { 0 };
  1049. unsigned m_file_write_bytes { 0 };
  1050. unsigned m_unix_socket_read_bytes { 0 };
  1051. unsigned m_unix_socket_write_bytes { 0 };
  1052. unsigned m_ipv4_socket_read_bytes { 0 };
  1053. unsigned m_ipv4_socket_write_bytes { 0 };
  1054. FPUState m_fpu_state {};
  1055. State m_state { Thread::State::Invalid };
  1056. NonnullOwnPtr<KString> m_name;
  1057. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1058. State m_stop_state { Thread::State::Invalid };
  1059. bool m_dump_backtrace_on_finalization { false };
  1060. bool m_should_die { false };
  1061. bool m_initialized { false };
  1062. bool m_is_idle_thread { false };
  1063. bool m_is_crashing { false };
  1064. bool m_is_promise_violation_pending { false };
  1065. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1066. Atomic<u32> m_nested_profiler_calls { 0 };
  1067. NonnullRefPtr<Timer> m_block_timer;
  1068. bool m_is_profiling_suppressed { false };
  1069. void yield_and_release_relock_big_lock();
  1070. enum class VerifyLockNotHeld {
  1071. Yes,
  1072. No
  1073. };
  1074. void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
  1075. void drop_thread_count();
  1076. mutable IntrusiveListNode<Thread> m_global_thread_list_node;
  1077. public:
  1078. using ListInProcess = IntrusiveList<&Thread::m_process_thread_list_node>;
  1079. using GlobalList = IntrusiveList<&Thread::m_global_thread_list_node>;
  1080. static SpinlockProtected<GlobalList>& all_instances();
  1081. };
  1082. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1083. template<IteratorFunction<Thread&> Callback>
  1084. inline IterationDecision Thread::for_each(Callback callback)
  1085. {
  1086. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1087. for (auto& thread : list) {
  1088. IterationDecision decision = callback(thread);
  1089. if (decision != IterationDecision::Continue)
  1090. return decision;
  1091. }
  1092. return IterationDecision::Continue;
  1093. });
  1094. }
  1095. template<IteratorFunction<Thread&> Callback>
  1096. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1097. {
  1098. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1099. for (auto& thread : list) {
  1100. if (thread.state() != state)
  1101. continue;
  1102. IterationDecision decision = callback(thread);
  1103. if (decision != IterationDecision::Continue)
  1104. return decision;
  1105. }
  1106. return IterationDecision::Continue;
  1107. });
  1108. }
  1109. template<VoidFunction<Thread&> Callback>
  1110. inline IterationDecision Thread::for_each(Callback callback)
  1111. {
  1112. return Thread::all_instances().with([&](auto& list) {
  1113. for (auto& thread : list) {
  1114. if (callback(thread) == IterationDecision::Break)
  1115. return IterationDecision::Break;
  1116. }
  1117. return IterationDecision::Continue;
  1118. });
  1119. }
  1120. template<VoidFunction<Thread&> Callback>
  1121. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1122. {
  1123. return for_each_in_state(state, [&](auto& thread) {
  1124. callback(thread);
  1125. return IterationDecision::Continue;
  1126. });
  1127. }
  1128. }
  1129. template<>
  1130. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1131. ErrorOr<void> format(FormatBuilder&, Kernel::Thread const&);
  1132. };