Thread.h 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/Error.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #include <AK/Time.h>
  14. #include <AK/Variant.h>
  15. #include <AK/Vector.h>
  16. #include <AK/WeakPtr.h>
  17. #include <AK/Weakable.h>
  18. #include <Kernel/Arch/RegisterState.h>
  19. #include <Kernel/Debug.h>
  20. #include <Kernel/Forward.h>
  21. #include <Kernel/KString.h>
  22. #include <Kernel/Library/ListedRefCounted.h>
  23. #include <Kernel/Locking/LockLocation.h>
  24. #include <Kernel/Locking/LockMode.h>
  25. #include <Kernel/Locking/LockRank.h>
  26. #include <Kernel/Locking/SpinlockProtected.h>
  27. #include <Kernel/Memory/VirtualRange.h>
  28. #include <Kernel/UnixTypes.h>
  29. #include <LibC/fd_set.h>
  30. #include <LibC/signal_numbers.h>
  31. namespace Kernel {
  32. class Timer;
  33. namespace Memory {
  34. extern RecursiveSpinlock s_mm_lock;
  35. }
  36. enum class DispatchSignalResult {
  37. Deferred = 0,
  38. Yield,
  39. Terminate,
  40. Continue
  41. };
  42. struct ThreadSpecificData {
  43. ThreadSpecificData* self;
  44. };
  45. #define THREAD_PRIORITY_MIN 1
  46. #define THREAD_PRIORITY_LOW 10
  47. #define THREAD_PRIORITY_NORMAL 30
  48. #define THREAD_PRIORITY_HIGH 50
  49. #define THREAD_PRIORITY_MAX 99
  50. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  51. struct ThreadRegisters {
  52. #if ARCH(I386)
  53. FlatPtr ss;
  54. FlatPtr gs;
  55. FlatPtr fs;
  56. FlatPtr es;
  57. FlatPtr ds;
  58. FlatPtr edi;
  59. FlatPtr esi;
  60. FlatPtr ebp;
  61. FlatPtr esp;
  62. FlatPtr ebx;
  63. FlatPtr edx;
  64. FlatPtr ecx;
  65. FlatPtr eax;
  66. FlatPtr eip;
  67. FlatPtr esp0;
  68. FlatPtr ss0;
  69. #else
  70. FlatPtr rdi;
  71. FlatPtr rsi;
  72. FlatPtr rbp;
  73. FlatPtr rsp;
  74. FlatPtr rbx;
  75. FlatPtr rdx;
  76. FlatPtr rcx;
  77. FlatPtr rax;
  78. FlatPtr r8;
  79. FlatPtr r9;
  80. FlatPtr r10;
  81. FlatPtr r11;
  82. FlatPtr r12;
  83. FlatPtr r13;
  84. FlatPtr r14;
  85. FlatPtr r15;
  86. FlatPtr rip;
  87. FlatPtr rsp0;
  88. #endif
  89. FlatPtr cs;
  90. #if ARCH(I386)
  91. FlatPtr eflags;
  92. FlatPtr flags() const { return eflags; }
  93. void set_flags(FlatPtr value) { eflags = value; }
  94. void set_sp(FlatPtr value) { esp = value; }
  95. void set_sp0(FlatPtr value) { esp0 = value; }
  96. void set_ip(FlatPtr value) { eip = value; }
  97. #else
  98. FlatPtr rflags;
  99. FlatPtr flags() const { return rflags; }
  100. void set_flags(FlatPtr value) { rflags = value; }
  101. void set_sp(FlatPtr value) { rsp = value; }
  102. void set_sp0(FlatPtr value) { rsp0 = value; }
  103. void set_ip(FlatPtr value) { rip = value; }
  104. #endif
  105. FlatPtr cr3;
  106. FlatPtr ip() const
  107. {
  108. #if ARCH(I386)
  109. return eip;
  110. #else
  111. return rip;
  112. #endif
  113. }
  114. FlatPtr sp() const
  115. {
  116. #if ARCH(I386)
  117. return esp;
  118. #else
  119. return rsp;
  120. #endif
  121. }
  122. };
  123. class Thread
  124. : public ListedRefCounted<Thread, LockType::Spinlock>
  125. , public Weakable<Thread> {
  126. AK_MAKE_NONCOPYABLE(Thread);
  127. AK_MAKE_NONMOVABLE(Thread);
  128. friend class Mutex;
  129. friend class Process;
  130. friend class Scheduler;
  131. friend struct ThreadReadyQueue;
  132. public:
  133. inline static Thread* current()
  134. {
  135. return Processor::current_thread();
  136. }
  137. static ErrorOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  138. ~Thread();
  139. static RefPtr<Thread> from_tid(ThreadID);
  140. static void finalize_dying_threads();
  141. ThreadID tid() const { return m_tid; }
  142. ProcessID pid() const;
  143. void set_priority(u32 p) { m_priority = p; }
  144. u32 priority() const { return m_priority; }
  145. void detach()
  146. {
  147. SpinlockLocker lock(m_lock);
  148. m_is_joinable = false;
  149. }
  150. [[nodiscard]] bool is_joinable() const
  151. {
  152. SpinlockLocker lock(m_lock);
  153. return m_is_joinable;
  154. }
  155. Process& process() { return m_process; }
  156. Process const& process() const { return m_process; }
  157. // NOTE: This returns a null-terminated string.
  158. StringView name() const
  159. {
  160. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  161. VERIFY(m_lock.is_locked_by_current_processor());
  162. return m_name->view();
  163. }
  164. void set_name(NonnullOwnPtr<KString> name)
  165. {
  166. SpinlockLocker lock(m_lock);
  167. m_name = move(name);
  168. }
  169. void finalize();
  170. enum class State : u8 {
  171. Invalid = 0,
  172. Runnable,
  173. Running,
  174. Dying,
  175. Dead,
  176. Stopped,
  177. Blocked,
  178. };
  179. class [[nodiscard]] BlockResult {
  180. public:
  181. enum Type {
  182. WokeNormally,
  183. NotBlocked,
  184. InterruptedBySignal,
  185. InterruptedByDeath,
  186. InterruptedByTimeout,
  187. };
  188. BlockResult() = delete;
  189. BlockResult(Type type)
  190. : m_type(type)
  191. {
  192. }
  193. bool operator==(Type type) const
  194. {
  195. return m_type == type;
  196. }
  197. bool operator!=(Type type) const
  198. {
  199. return m_type != type;
  200. }
  201. [[nodiscard]] bool was_interrupted() const
  202. {
  203. switch (m_type) {
  204. case InterruptedBySignal:
  205. case InterruptedByDeath:
  206. return true;
  207. default:
  208. return false;
  209. }
  210. }
  211. private:
  212. Type m_type;
  213. };
  214. class BlockTimeout {
  215. public:
  216. BlockTimeout()
  217. : m_infinite(true)
  218. {
  219. }
  220. explicit BlockTimeout(bool is_absolute, Time const* time, Time const* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  221. Time const& absolute_time() const { return m_time; }
  222. Time const* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  223. clockid_t clock_id() const { return m_clock_id; }
  224. bool is_infinite() const { return m_infinite; }
  225. private:
  226. Time m_time {};
  227. Time m_start_time {};
  228. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  229. bool m_infinite { false };
  230. };
  231. class BlockerSet;
  232. class Blocker {
  233. AK_MAKE_NONMOVABLE(Blocker);
  234. AK_MAKE_NONCOPYABLE(Blocker);
  235. public:
  236. enum class Type {
  237. Unknown = 0,
  238. File,
  239. Futex,
  240. Plan9FS,
  241. Join,
  242. Queue,
  243. Routing,
  244. Sleep,
  245. Signal,
  246. Wait,
  247. Flock
  248. };
  249. virtual ~Blocker();
  250. virtual StringView state_string() const = 0;
  251. virtual Type blocker_type() const = 0;
  252. virtual BlockTimeout const& override_timeout(BlockTimeout const& timeout) { return timeout; }
  253. virtual bool can_be_interrupted() const { return true; }
  254. virtual bool setup_blocker();
  255. virtual void finalize();
  256. Thread& thread() { return m_thread; }
  257. enum class UnblockImmediatelyReason {
  258. UnblockConditionAlreadyMet,
  259. TimeoutInThePast,
  260. };
  261. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
  262. virtual void was_unblocked(bool did_timeout)
  263. {
  264. if (did_timeout) {
  265. SpinlockLocker lock(m_lock);
  266. m_did_timeout = true;
  267. }
  268. }
  269. void set_interrupted_by_death()
  270. {
  271. SpinlockLocker lock(m_lock);
  272. do_set_interrupted_by_death();
  273. }
  274. void set_interrupted_by_signal(u8 signal)
  275. {
  276. SpinlockLocker lock(m_lock);
  277. do_set_interrupted_by_signal(signal);
  278. }
  279. u8 was_interrupted_by_signal() const
  280. {
  281. SpinlockLocker lock(m_lock);
  282. return do_get_interrupted_by_signal();
  283. }
  284. virtual Thread::BlockResult block_result()
  285. {
  286. SpinlockLocker lock(m_lock);
  287. if (m_was_interrupted_by_death)
  288. return Thread::BlockResult::InterruptedByDeath;
  289. if (m_was_interrupted_by_signal != 0)
  290. return Thread::BlockResult::InterruptedBySignal;
  291. if (m_did_timeout)
  292. return Thread::BlockResult::InterruptedByTimeout;
  293. return Thread::BlockResult::WokeNormally;
  294. }
  295. void begin_blocking(Badge<Thread>);
  296. BlockResult end_blocking(Badge<Thread>, bool);
  297. protected:
  298. Blocker()
  299. : m_thread(*Thread::current())
  300. {
  301. }
  302. void do_set_interrupted_by_death()
  303. {
  304. m_was_interrupted_by_death = true;
  305. }
  306. void do_set_interrupted_by_signal(u8 signal)
  307. {
  308. VERIFY(signal != 0);
  309. m_was_interrupted_by_signal = signal;
  310. }
  311. void do_clear_interrupted_by_signal()
  312. {
  313. m_was_interrupted_by_signal = 0;
  314. }
  315. u8 do_get_interrupted_by_signal() const
  316. {
  317. return m_was_interrupted_by_signal;
  318. }
  319. [[nodiscard]] bool was_interrupted() const
  320. {
  321. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  322. }
  323. void unblock_from_blocker()
  324. {
  325. {
  326. SpinlockLocker lock(m_lock);
  327. if (!m_is_blocking)
  328. return;
  329. m_is_blocking = false;
  330. }
  331. m_thread->unblock_from_blocker(*this);
  332. }
  333. bool add_to_blocker_set(BlockerSet&, void* = nullptr);
  334. void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
  335. // FIXME: Figure out whether this can be Thread.
  336. mutable RecursiveSpinlock m_lock { LockRank::None };
  337. private:
  338. BlockerSet* m_blocker_set { nullptr };
  339. NonnullRefPtr<Thread> m_thread;
  340. u8 m_was_interrupted_by_signal { 0 };
  341. bool m_is_blocking { false };
  342. bool m_was_interrupted_by_death { false };
  343. bool m_did_timeout { false };
  344. };
  345. class BlockerSet {
  346. AK_MAKE_NONCOPYABLE(BlockerSet);
  347. AK_MAKE_NONMOVABLE(BlockerSet);
  348. public:
  349. BlockerSet() = default;
  350. virtual ~BlockerSet()
  351. {
  352. VERIFY(!m_lock.is_locked());
  353. VERIFY(m_blockers.is_empty());
  354. }
  355. bool add_blocker(Blocker& blocker, void* data)
  356. {
  357. SpinlockLocker lock(m_lock);
  358. if (!should_add_blocker(blocker, data))
  359. return false;
  360. m_blockers.append({ &blocker, data });
  361. return true;
  362. }
  363. void remove_blocker(Blocker& blocker)
  364. {
  365. SpinlockLocker lock(m_lock);
  366. // NOTE: it's possible that the blocker is no longer present
  367. m_blockers.remove_all_matching([&](auto& info) {
  368. return info.blocker == &blocker;
  369. });
  370. }
  371. bool is_empty() const
  372. {
  373. SpinlockLocker lock(m_lock);
  374. return is_empty_locked();
  375. }
  376. protected:
  377. template<typename Callback>
  378. bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
  379. {
  380. SpinlockLocker lock(m_lock);
  381. return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
  382. }
  383. template<typename Callback>
  384. bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
  385. {
  386. VERIFY(m_lock.is_locked());
  387. bool stop_iterating = false;
  388. bool did_unblock_any = false;
  389. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  390. auto& info = m_blockers[i];
  391. if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
  392. m_blockers.remove(i);
  393. did_unblock_any = true;
  394. continue;
  395. }
  396. i++;
  397. }
  398. return did_unblock_any;
  399. }
  400. bool is_empty_locked() const
  401. {
  402. VERIFY(m_lock.is_locked());
  403. return m_blockers.is_empty();
  404. }
  405. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  406. struct BlockerInfo {
  407. Blocker* blocker;
  408. void* data;
  409. };
  410. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  411. {
  412. if (m_blockers.size() <= count)
  413. return move(m_blockers);
  414. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  415. VERIFY(move_count > 0);
  416. Vector<BlockerInfo, 4> taken_blockers;
  417. taken_blockers.ensure_capacity(move_count);
  418. for (size_t i = 0; i < move_count; i++)
  419. taken_blockers.append(m_blockers.take(i));
  420. m_blockers.remove(0, move_count);
  421. return taken_blockers;
  422. }
  423. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  424. {
  425. if (blockers_to_append.is_empty())
  426. return;
  427. if (m_blockers.is_empty()) {
  428. m_blockers = move(blockers_to_append);
  429. return;
  430. }
  431. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  432. for (size_t i = 0; i < blockers_to_append.size(); i++)
  433. m_blockers.append(blockers_to_append.take(i));
  434. blockers_to_append.clear();
  435. }
  436. // FIXME: Check whether this can be Thread.
  437. mutable Spinlock m_lock { LockRank::None };
  438. private:
  439. Vector<BlockerInfo, 4> m_blockers;
  440. };
  441. friend class JoinBlocker;
  442. class JoinBlocker final : public Blocker {
  443. public:
  444. explicit JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value);
  445. virtual Type blocker_type() const override { return Type::Join; }
  446. virtual StringView state_string() const override { return "Joining"sv; }
  447. virtual bool can_be_interrupted() const override { return false; }
  448. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  449. virtual bool setup_blocker() override;
  450. bool unblock(void*, bool);
  451. private:
  452. NonnullRefPtr<Thread> m_joinee;
  453. void*& m_joinee_exit_value;
  454. ErrorOr<void>& m_try_join_result;
  455. bool m_did_unblock { false };
  456. };
  457. class WaitQueueBlocker final : public Blocker {
  458. public:
  459. explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
  460. virtual ~WaitQueueBlocker();
  461. virtual Type blocker_type() const override { return Type::Queue; }
  462. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  463. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  464. virtual bool setup_blocker() override;
  465. bool unblock();
  466. protected:
  467. WaitQueue& m_wait_queue;
  468. StringView m_block_reason;
  469. bool m_did_unblock { false };
  470. };
  471. class FutexBlocker final : public Blocker {
  472. public:
  473. explicit FutexBlocker(FutexQueue&, u32);
  474. virtual ~FutexBlocker();
  475. virtual Type blocker_type() const override { return Type::Futex; }
  476. virtual StringView state_string() const override { return "Futex"sv; }
  477. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  478. virtual bool setup_blocker() override;
  479. u32 bitset() const { return m_bitset; }
  480. void begin_requeue()
  481. {
  482. // We need to hold the lock until we moved it over
  483. m_relock_flags = m_lock.lock();
  484. }
  485. void finish_requeue(FutexQueue&);
  486. bool unblock_bitset(u32 bitset);
  487. bool unblock(bool force = false);
  488. protected:
  489. FutexQueue& m_futex_queue;
  490. u32 m_bitset { 0 };
  491. u32 m_relock_flags { 0 };
  492. bool m_did_unblock { false };
  493. };
  494. class FileBlocker : public Blocker {
  495. public:
  496. enum class BlockFlags : u16 {
  497. None = 0,
  498. Read = 1 << 0,
  499. Write = 1 << 1,
  500. ReadPriority = 1 << 2,
  501. WritePriority = 1 << 3,
  502. Accept = 1 << 4,
  503. Connect = 1 << 5,
  504. SocketFlags = Accept | Connect,
  505. WriteError = 1 << 6,
  506. WriteHangUp = 1 << 7,
  507. ReadHangUp = 1 << 8,
  508. Exception = WriteError | WriteHangUp | ReadHangUp,
  509. };
  510. virtual Type blocker_type() const override { return Type::File; }
  511. virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
  512. };
  513. class OpenFileDescriptionBlocker : public FileBlocker {
  514. public:
  515. OpenFileDescription const& blocked_description() const;
  516. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  517. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  518. virtual bool setup_blocker() override;
  519. protected:
  520. explicit OpenFileDescriptionBlocker(OpenFileDescription&, BlockFlags, BlockFlags&);
  521. private:
  522. NonnullRefPtr<OpenFileDescription> m_blocked_description;
  523. const BlockFlags m_flags;
  524. BlockFlags& m_unblocked_flags;
  525. bool m_did_unblock { false };
  526. };
  527. class AcceptBlocker final : public OpenFileDescriptionBlocker {
  528. public:
  529. explicit AcceptBlocker(OpenFileDescription&, BlockFlags&);
  530. virtual StringView state_string() const override { return "Accepting"sv; }
  531. };
  532. class ConnectBlocker final : public OpenFileDescriptionBlocker {
  533. public:
  534. explicit ConnectBlocker(OpenFileDescription&, BlockFlags&);
  535. virtual StringView state_string() const override { return "Connecting"sv; }
  536. };
  537. class WriteBlocker final : public OpenFileDescriptionBlocker {
  538. public:
  539. explicit WriteBlocker(OpenFileDescription&, BlockFlags&);
  540. virtual StringView state_string() const override { return "Writing"sv; }
  541. virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
  542. private:
  543. BlockTimeout m_timeout;
  544. };
  545. class ReadBlocker final : public OpenFileDescriptionBlocker {
  546. public:
  547. explicit ReadBlocker(OpenFileDescription&, BlockFlags&);
  548. virtual StringView state_string() const override { return "Reading"sv; }
  549. virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
  550. private:
  551. BlockTimeout m_timeout;
  552. };
  553. class SleepBlocker final : public Blocker {
  554. public:
  555. explicit SleepBlocker(BlockTimeout const&, Time* = nullptr);
  556. virtual StringView state_string() const override { return "Sleeping"sv; }
  557. virtual Type blocker_type() const override { return Type::Sleep; }
  558. virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
  559. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  560. virtual void was_unblocked(bool) override;
  561. virtual Thread::BlockResult block_result() override;
  562. private:
  563. void calculate_remaining();
  564. BlockTimeout m_deadline;
  565. Time* m_remaining;
  566. };
  567. class SelectBlocker final : public FileBlocker {
  568. public:
  569. struct FDInfo {
  570. RefPtr<OpenFileDescription> description;
  571. BlockFlags block_flags { BlockFlags::None };
  572. BlockFlags unblocked_flags { BlockFlags::None };
  573. };
  574. using FDVector = Vector<FDInfo, FD_SETSIZE>;
  575. explicit SelectBlocker(FDVector&);
  576. virtual ~SelectBlocker();
  577. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  578. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  579. virtual void was_unblocked(bool) override;
  580. virtual StringView state_string() const override { return "Selecting"sv; }
  581. virtual bool setup_blocker() override;
  582. virtual void finalize() override;
  583. private:
  584. size_t collect_unblocked_flags();
  585. FDVector& m_fds;
  586. bool m_did_unblock { false };
  587. };
  588. class SignalBlocker final : public Blocker {
  589. public:
  590. explicit SignalBlocker(sigset_t pending_set, siginfo_t& result);
  591. virtual StringView state_string() const override { return "Pending Signal"sv; }
  592. virtual Type blocker_type() const override { return Type::Signal; }
  593. void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  594. virtual bool setup_blocker() override;
  595. bool check_pending_signals(bool from_add_blocker);
  596. private:
  597. sigset_t m_pending_set { 0 };
  598. siginfo_t& m_result;
  599. bool m_did_unblock { false };
  600. };
  601. class SignalBlockerSet final : public BlockerSet {
  602. public:
  603. void unblock_all_blockers_whose_conditions_are_met()
  604. {
  605. BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
  606. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  607. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  608. return blocker.check_pending_signals(false);
  609. });
  610. }
  611. private:
  612. bool should_add_blocker(Blocker& b, void*) override
  613. {
  614. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  615. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  616. return !blocker.check_pending_signals(true);
  617. }
  618. };
  619. class WaitBlocker final : public Blocker {
  620. public:
  621. enum class UnblockFlags {
  622. Terminated,
  623. Stopped,
  624. Continued,
  625. Disowned
  626. };
  627. WaitBlocker(int wait_options, Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result);
  628. virtual StringView state_string() const override { return "Waiting"sv; }
  629. virtual Type blocker_type() const override { return Type::Wait; }
  630. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  631. virtual void was_unblocked(bool) override;
  632. virtual bool setup_blocker() override;
  633. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  634. bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
  635. private:
  636. void do_was_disowned();
  637. void do_set_result(siginfo_t const&);
  638. int const m_wait_options;
  639. ErrorOr<siginfo_t>& m_result;
  640. Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> m_waitee;
  641. bool m_did_unblock { false };
  642. bool m_got_sigchild { false };
  643. };
  644. class WaitBlockerSet final : public BlockerSet {
  645. friend class WaitBlocker;
  646. public:
  647. explicit WaitBlockerSet(Process& process)
  648. : m_process(process)
  649. {
  650. }
  651. void disowned_by_waiter(Process&);
  652. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  653. void try_unblock(WaitBlocker&);
  654. void finalize();
  655. protected:
  656. virtual bool should_add_blocker(Blocker&, void*) override;
  657. private:
  658. struct ProcessBlockInfo {
  659. NonnullRefPtr<Process> process;
  660. WaitBlocker::UnblockFlags flags;
  661. u8 signal;
  662. bool was_waited { false };
  663. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  664. ~ProcessBlockInfo();
  665. };
  666. Process& m_process;
  667. Vector<ProcessBlockInfo, 2> m_processes;
  668. bool m_finalized { false };
  669. };
  670. class FlockBlocker final : public Blocker {
  671. public:
  672. FlockBlocker(NonnullRefPtr<Inode>, flock const&);
  673. virtual StringView state_string() const override { return "Locking File"sv; }
  674. virtual Type blocker_type() const override { return Type::Flock; }
  675. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  676. virtual bool setup_blocker() override;
  677. bool try_unblock(bool from_add_blocker);
  678. private:
  679. NonnullRefPtr<Inode> m_inode;
  680. flock const& m_flock;
  681. bool m_did_unblock { false };
  682. };
  683. class FlockBlockerSet final : public BlockerSet {
  684. public:
  685. void unblock_all_blockers_whose_conditions_are_met()
  686. {
  687. BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
  688. VERIFY(b.blocker_type() == Blocker::Type::Flock);
  689. auto& blocker = static_cast<Thread::FlockBlocker&>(b);
  690. return blocker.try_unblock(false);
  691. });
  692. }
  693. private:
  694. bool should_add_blocker(Blocker& b, void*) override
  695. {
  696. VERIFY(b.blocker_type() == Blocker::Type::Flock);
  697. auto& blocker = static_cast<Thread::FlockBlocker&>(b);
  698. return !blocker.try_unblock(true);
  699. }
  700. };
  701. template<typename AddBlockerHandler>
  702. ErrorOr<void> try_join(AddBlockerHandler add_blocker)
  703. {
  704. if (Thread::current() == this)
  705. return EDEADLK;
  706. SpinlockLocker lock(m_lock);
  707. if (!m_is_joinable || state() == Thread::State::Dead)
  708. return EINVAL;
  709. add_blocker();
  710. // From this point on the thread is no longer joinable by anyone
  711. // else. It also means that if the join is timed, it becomes
  712. // detached when a timeout happens.
  713. m_is_joinable = false;
  714. return {};
  715. }
  716. void did_schedule() { ++m_times_scheduled; }
  717. u32 times_scheduled() const { return m_times_scheduled; }
  718. void resume_from_stopped();
  719. [[nodiscard]] bool should_be_stopped() const;
  720. [[nodiscard]] bool is_stopped() const { return m_state == Thread::State::Stopped; }
  721. [[nodiscard]] bool is_blocked() const { return m_state == Thread::State::Blocked; }
  722. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  723. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  724. u32 affinity() const { return m_cpu_affinity; }
  725. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  726. RegisterState& get_register_dump_from_stack();
  727. RegisterState const& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  728. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  729. DebugRegisterState const& debug_register_state() const { return m_debug_register_state; }
  730. ThreadRegisters& regs() { return m_regs; }
  731. ThreadRegisters const& regs() const { return m_regs; }
  732. State state() const { return m_state; }
  733. StringView state_string() const;
  734. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  735. size_t thread_specific_region_size() const;
  736. size_t thread_specific_region_alignment() const;
  737. ALWAYS_INLINE void yield_if_stopped()
  738. {
  739. // If some thread stopped us, we need to yield to someone else
  740. // We check this when entering/exiting a system call. A thread
  741. // may continue to execute in user land until the next timer
  742. // tick or entering the next system call, or if it's in kernel
  743. // mode then we will intercept prior to returning back to user
  744. // mode.
  745. SpinlockLocker lock(m_lock);
  746. while (state() == Thread::State::Stopped) {
  747. lock.unlock();
  748. // We shouldn't be holding the big lock here
  749. yield_without_releasing_big_lock();
  750. lock.lock();
  751. }
  752. }
  753. void block(Kernel::Mutex&, SpinlockLocker<Spinlock>&, u32);
  754. template<typename BlockerType, class... Args>
  755. BlockResult block(BlockTimeout const& timeout, Args&&... args)
  756. {
  757. BlockerType blocker(forward<Args>(args)...);
  758. return block_impl(timeout, blocker);
  759. }
  760. u32 unblock_from_mutex(Kernel::Mutex&);
  761. void unblock_from_blocker(Blocker&);
  762. void unblock(u8 signal = 0);
  763. template<class... Args>
  764. Thread::BlockResult wait_on(WaitQueue& wait_queue, Thread::BlockTimeout const& timeout, Args&&... args)
  765. {
  766. VERIFY(this == Thread::current());
  767. return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  768. }
  769. BlockResult sleep(clockid_t, Time const&, Time* = nullptr);
  770. BlockResult sleep(Time const& duration, Time* remaining_time = nullptr)
  771. {
  772. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  773. }
  774. BlockResult sleep_until(clockid_t, Time const&);
  775. BlockResult sleep_until(Time const& duration)
  776. {
  777. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  778. }
  779. // Tell this thread to unblock if needed,
  780. // gracefully unwind the stack and die.
  781. void set_should_die();
  782. [[nodiscard]] bool should_die() const { return m_should_die; }
  783. void die_if_needed();
  784. void exit(void* = nullptr);
  785. void update_time_scheduled(u64, bool, bool);
  786. bool tick();
  787. void set_ticks_left(u32 t) { m_ticks_left = t; }
  788. u32 ticks_left() const { return m_ticks_left; }
  789. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  790. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  791. void set_state(State, u8 = 0);
  792. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  793. void set_initialized(bool initialized) { m_initialized = initialized; }
  794. void send_urgent_signal_to_self(u8 signal);
  795. void send_signal(u8 signal, Process* sender);
  796. u32 update_signal_mask(u32 signal_mask);
  797. u32 signal_mask_block(sigset_t signal_set, bool block);
  798. u32 signal_mask() const;
  799. void reset_signals_for_exec();
  800. ErrorOr<FlatPtr> peek_debug_register(u32 register_index);
  801. ErrorOr<void> poke_debug_register(u32 register_index, FlatPtr data);
  802. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  803. DispatchSignalResult dispatch_one_pending_signal();
  804. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  805. DispatchSignalResult dispatch_signal(u8 signal);
  806. void check_dispatch_pending_signal();
  807. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  808. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  809. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  810. [[nodiscard]] bool is_signal_masked(u8 signal) const;
  811. u32 pending_signals() const;
  812. u32 pending_signals_for_state() const;
  813. [[nodiscard]] bool has_alternative_signal_stack() const;
  814. [[nodiscard]] bool is_in_alternative_signal_stack() const;
  815. FPUState& fpu_state() { return m_fpu_state; }
  816. ErrorOr<void> make_thread_specific_region(Badge<Process>);
  817. unsigned syscall_count() const { return m_syscall_count; }
  818. void did_syscall() { ++m_syscall_count; }
  819. unsigned inode_faults() const { return m_inode_faults; }
  820. void did_inode_fault() { ++m_inode_faults; }
  821. unsigned zero_faults() const { return m_zero_faults; }
  822. void did_zero_fault() { ++m_zero_faults; }
  823. unsigned cow_faults() const { return m_cow_faults; }
  824. void did_cow_fault() { ++m_cow_faults; }
  825. unsigned file_read_bytes() const { return m_file_read_bytes; }
  826. unsigned file_write_bytes() const { return m_file_write_bytes; }
  827. void did_file_read(unsigned bytes)
  828. {
  829. m_file_read_bytes += bytes;
  830. }
  831. void did_file_write(unsigned bytes)
  832. {
  833. m_file_write_bytes += bytes;
  834. }
  835. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  836. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  837. void did_unix_socket_read(unsigned bytes)
  838. {
  839. m_unix_socket_read_bytes += bytes;
  840. }
  841. void did_unix_socket_write(unsigned bytes)
  842. {
  843. m_unix_socket_write_bytes += bytes;
  844. }
  845. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  846. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  847. void did_ipv4_socket_read(unsigned bytes)
  848. {
  849. m_ipv4_socket_read_bytes += bytes;
  850. }
  851. void did_ipv4_socket_write(unsigned bytes)
  852. {
  853. m_ipv4_socket_write_bytes += bytes;
  854. }
  855. void set_active(bool active) { m_is_active = active; }
  856. u32 saved_critical() const { return m_saved_critical; }
  857. void save_critical(u32 critical) { m_saved_critical = critical; }
  858. void track_lock_acquire(LockRank rank);
  859. void track_lock_release(LockRank rank);
  860. [[nodiscard]] bool is_active() const { return m_is_active; }
  861. [[nodiscard]] bool is_finalizable() const
  862. {
  863. // We can't finalize as long as this thread is still running
  864. // Note that checking for Running state here isn't sufficient
  865. // as the thread may not be in Running state but switching out.
  866. // m_is_active is set to false once the context switch is
  867. // complete and the thread is not executing on any processor.
  868. if (m_is_active.load(AK::memory_order_acquire))
  869. return false;
  870. // We can't finalize until the thread is either detached or
  871. // a join has started. We can't make m_is_joinable atomic
  872. // because that would introduce a race in try_join.
  873. SpinlockLocker lock(m_lock);
  874. return !m_is_joinable;
  875. }
  876. ErrorOr<NonnullRefPtr<Thread>> try_clone(Process&);
  877. template<IteratorFunction<Thread&> Callback>
  878. static IterationDecision for_each_in_state(State, Callback);
  879. template<IteratorFunction<Thread&> Callback>
  880. static IterationDecision for_each(Callback);
  881. template<VoidFunction<Thread&> Callback>
  882. static IterationDecision for_each_in_state(State, Callback);
  883. template<VoidFunction<Thread&> Callback>
  884. static IterationDecision for_each(Callback);
  885. static constexpr u32 default_kernel_stack_size = 65536;
  886. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  887. u64 time_in_user() const { return m_total_time_scheduled_user.load(AK::MemoryOrder::memory_order_relaxed); }
  888. u64 time_in_kernel() const { return m_total_time_scheduled_kernel.load(AK::MemoryOrder::memory_order_relaxed); }
  889. enum class PreviousMode : u8 {
  890. KernelMode = 0,
  891. UserMode
  892. };
  893. PreviousMode previous_mode() const { return m_previous_mode; }
  894. bool set_previous_mode(PreviousMode mode)
  895. {
  896. if (m_previous_mode == mode)
  897. return false;
  898. m_previous_mode = mode;
  899. return true;
  900. }
  901. TrapFrame*& current_trap() { return m_current_trap; }
  902. TrapFrame const* const& current_trap() const { return m_current_trap; }
  903. RecursiveSpinlock& get_lock() const { return m_lock; }
  904. #if LOCK_DEBUG
  905. void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
  906. {
  907. VERIFY(refs_delta != 0);
  908. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  909. SpinlockLocker list_lock(m_holding_locks_lock);
  910. if (refs_delta > 0) {
  911. bool have_existing = false;
  912. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  913. auto& info = m_holding_locks_list[i];
  914. if (info.lock == &lock) {
  915. have_existing = true;
  916. info.count += refs_delta;
  917. break;
  918. }
  919. }
  920. if (!have_existing)
  921. m_holding_locks_list.append({ &lock, location, 1 });
  922. } else {
  923. VERIFY(refs_delta < 0);
  924. bool found = false;
  925. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  926. auto& info = m_holding_locks_list[i];
  927. if (info.lock == &lock) {
  928. VERIFY(info.count >= (unsigned)-refs_delta);
  929. info.count -= (unsigned)-refs_delta;
  930. if (info.count == 0)
  931. m_holding_locks_list.remove(i);
  932. found = true;
  933. break;
  934. }
  935. }
  936. VERIFY(found);
  937. }
  938. }
  939. u32 lock_count() const
  940. {
  941. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  942. }
  943. #endif
  944. bool is_handling_page_fault() const
  945. {
  946. return m_handling_page_fault;
  947. }
  948. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  949. void set_idle_thread() { m_is_idle_thread = true; }
  950. bool is_idle_thread() const { return m_is_idle_thread; }
  951. void set_crashing() { m_is_crashing = true; }
  952. [[nodiscard]] bool is_crashing() const { return m_is_crashing; }
  953. ALWAYS_INLINE u32 enter_profiler()
  954. {
  955. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  956. }
  957. ALWAYS_INLINE u32 leave_profiler()
  958. {
  959. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  960. }
  961. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  962. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  963. bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
  964. void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
  965. bool is_allocation_enabled() const { return m_allocation_enabled; }
  966. void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
  967. ErrorOr<NonnullOwnPtr<KString>> backtrace();
  968. private:
  969. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, NonnullOwnPtr<KString>);
  970. BlockResult block_impl(BlockTimeout const&, Blocker&);
  971. IntrusiveListNode<Thread> m_process_thread_list_node;
  972. int m_runnable_priority { -1 };
  973. friend class WaitQueue;
  974. class JoinBlockerSet final : public BlockerSet {
  975. public:
  976. void thread_did_exit(void* exit_value)
  977. {
  978. SpinlockLocker lock(m_lock);
  979. VERIFY(!m_thread_did_exit);
  980. m_thread_did_exit = true;
  981. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  982. do_unblock_joiner();
  983. }
  984. void thread_finalizing()
  985. {
  986. SpinlockLocker lock(m_lock);
  987. do_unblock_joiner();
  988. }
  989. void* exit_value() const
  990. {
  991. VERIFY(m_thread_did_exit);
  992. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  993. }
  994. void try_unblock(JoinBlocker& blocker)
  995. {
  996. SpinlockLocker lock(m_lock);
  997. if (m_thread_did_exit)
  998. blocker.unblock(exit_value(), false);
  999. }
  1000. protected:
  1001. virtual bool should_add_blocker(Blocker& b, void*) override
  1002. {
  1003. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1004. auto& blocker = static_cast<JoinBlocker&>(b);
  1005. // NOTE: m_lock is held already!
  1006. if (m_thread_did_exit) {
  1007. blocker.unblock(exit_value(), true);
  1008. return false;
  1009. }
  1010. return true;
  1011. }
  1012. private:
  1013. void do_unblock_joiner()
  1014. {
  1015. unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
  1016. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1017. auto& blocker = static_cast<JoinBlocker&>(b);
  1018. return blocker.unblock(exit_value(), false);
  1019. });
  1020. }
  1021. Atomic<void*> m_exit_value { nullptr };
  1022. bool m_thread_did_exit { false };
  1023. };
  1024. LockMode unlock_process_if_locked(u32&);
  1025. void relock_process(LockMode, u32);
  1026. void reset_fpu_state();
  1027. mutable RecursiveSpinlock m_lock { LockRank::Thread };
  1028. mutable RecursiveSpinlock m_block_lock { LockRank::None };
  1029. NonnullRefPtr<Process> m_process;
  1030. ThreadID m_tid { -1 };
  1031. ThreadRegisters m_regs {};
  1032. DebugRegisterState m_debug_register_state {};
  1033. TrapFrame* m_current_trap { nullptr };
  1034. u32 m_saved_critical { 1 };
  1035. IntrusiveListNode<Thread> m_ready_queue_node;
  1036. Atomic<u32> m_cpu { 0 };
  1037. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1038. Optional<u64> m_last_time_scheduled;
  1039. Atomic<u64> m_total_time_scheduled_user { 0 };
  1040. Atomic<u64> m_total_time_scheduled_kernel { 0 };
  1041. u32 m_ticks_left { 0 };
  1042. u32 m_times_scheduled { 0 };
  1043. u32 m_ticks_in_user { 0 };
  1044. u32 m_ticks_in_kernel { 0 };
  1045. u32 m_pending_signals { 0 };
  1046. u8 m_currently_handled_signal { 0 };
  1047. u32 m_signal_mask { 0 };
  1048. FlatPtr m_alternative_signal_stack { 0 };
  1049. FlatPtr m_alternative_signal_stack_size { 0 };
  1050. SignalBlockerSet m_signal_blocker_set;
  1051. FlatPtr m_kernel_stack_base { 0 };
  1052. FlatPtr m_kernel_stack_top { 0 };
  1053. NonnullOwnPtr<Memory::Region> m_kernel_stack_region;
  1054. VirtualAddress m_thread_specific_data;
  1055. Optional<Memory::VirtualRange> m_thread_specific_range;
  1056. Array<Optional<u32>, NSIG> m_signal_action_masks;
  1057. Array<ProcessID, NSIG> m_signal_senders;
  1058. Blocker* m_blocker { nullptr };
  1059. Kernel::Mutex* m_blocking_mutex { nullptr };
  1060. u32 m_lock_requested_count { 0 };
  1061. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1062. LockRank m_lock_rank_mask { LockRank::None };
  1063. bool m_allocation_enabled { true };
  1064. // FIXME: remove this after annihilating Process::m_big_lock
  1065. IntrusiveListNode<Thread> m_big_lock_blocked_threads_list_node;
  1066. #if LOCK_DEBUG
  1067. struct HoldingLockInfo {
  1068. Mutex* lock;
  1069. LockLocation lock_location;
  1070. unsigned count;
  1071. };
  1072. Atomic<u32> m_holding_locks { 0 };
  1073. Spinlock m_holding_locks_lock { LockRank::None };
  1074. Vector<HoldingLockInfo> m_holding_locks_list;
  1075. #endif
  1076. JoinBlockerSet m_join_blocker_set;
  1077. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1078. bool m_is_joinable { true };
  1079. bool m_handling_page_fault { false };
  1080. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1081. unsigned m_syscall_count { 0 };
  1082. unsigned m_inode_faults { 0 };
  1083. unsigned m_zero_faults { 0 };
  1084. unsigned m_cow_faults { 0 };
  1085. unsigned m_file_read_bytes { 0 };
  1086. unsigned m_file_write_bytes { 0 };
  1087. unsigned m_unix_socket_read_bytes { 0 };
  1088. unsigned m_unix_socket_write_bytes { 0 };
  1089. unsigned m_ipv4_socket_read_bytes { 0 };
  1090. unsigned m_ipv4_socket_write_bytes { 0 };
  1091. FPUState m_fpu_state {};
  1092. State m_state { Thread::State::Invalid };
  1093. NonnullOwnPtr<KString> m_name;
  1094. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1095. State m_stop_state { Thread::State::Invalid };
  1096. bool m_dump_backtrace_on_finalization { false };
  1097. bool m_should_die { false };
  1098. bool m_initialized { false };
  1099. bool m_is_idle_thread { false };
  1100. bool m_is_crashing { false };
  1101. bool m_is_promise_violation_pending { false };
  1102. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1103. Atomic<u32> m_nested_profiler_calls { 0 };
  1104. NonnullRefPtr<Timer> m_block_timer;
  1105. bool m_is_profiling_suppressed { false };
  1106. void yield_and_release_relock_big_lock();
  1107. enum class VerifyLockNotHeld {
  1108. Yes,
  1109. No
  1110. };
  1111. void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
  1112. void drop_thread_count();
  1113. mutable IntrusiveListNode<Thread> m_global_thread_list_node;
  1114. public:
  1115. using ListInProcess = IntrusiveList<&Thread::m_process_thread_list_node>;
  1116. using GlobalList = IntrusiveList<&Thread::m_global_thread_list_node>;
  1117. static SpinlockProtected<GlobalList>& all_instances();
  1118. };
  1119. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1120. template<IteratorFunction<Thread&> Callback>
  1121. inline IterationDecision Thread::for_each(Callback callback)
  1122. {
  1123. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1124. for (auto& thread : list) {
  1125. IterationDecision decision = callback(thread);
  1126. if (decision != IterationDecision::Continue)
  1127. return decision;
  1128. }
  1129. return IterationDecision::Continue;
  1130. });
  1131. }
  1132. template<IteratorFunction<Thread&> Callback>
  1133. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1134. {
  1135. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1136. for (auto& thread : list) {
  1137. if (thread.state() != state)
  1138. continue;
  1139. IterationDecision decision = callback(thread);
  1140. if (decision != IterationDecision::Continue)
  1141. return decision;
  1142. }
  1143. return IterationDecision::Continue;
  1144. });
  1145. }
  1146. template<VoidFunction<Thread&> Callback>
  1147. inline IterationDecision Thread::for_each(Callback callback)
  1148. {
  1149. return Thread::all_instances().with([&](auto& list) {
  1150. for (auto& thread : list) {
  1151. if (callback(thread) == IterationDecision::Break)
  1152. return IterationDecision::Break;
  1153. }
  1154. return IterationDecision::Continue;
  1155. });
  1156. }
  1157. template<VoidFunction<Thread&> Callback>
  1158. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1159. {
  1160. return for_each_in_state(state, [&](auto& thread) {
  1161. callback(thread);
  1162. return IterationDecision::Continue;
  1163. });
  1164. }
  1165. }
  1166. template<>
  1167. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1168. ErrorOr<void> format(FormatBuilder&, Kernel::Thread const&);
  1169. };