Thread.h 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/HashMap.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #include <AK/String.h>
  14. #include <AK/Time.h>
  15. #include <AK/Vector.h>
  16. #include <AK/WeakPtr.h>
  17. #include <AK/Weakable.h>
  18. #include <Kernel/Arch/x86/RegisterState.h>
  19. #include <Kernel/Arch/x86/SafeMem.h>
  20. #include <Kernel/Debug.h>
  21. #include <Kernel/FileSystem/InodeIdentifier.h>
  22. #include <Kernel/Forward.h>
  23. #include <Kernel/KResult.h>
  24. #include <Kernel/KString.h>
  25. #include <Kernel/Library/ListedRefCounted.h>
  26. #include <Kernel/Locking/LockLocation.h>
  27. #include <Kernel/Locking/LockMode.h>
  28. #include <Kernel/Locking/SpinlockProtected.h>
  29. #include <Kernel/Memory/VirtualRange.h>
  30. #include <Kernel/Scheduler.h>
  31. #include <Kernel/TimerQueue.h>
  32. #include <Kernel/UnixTypes.h>
  33. #include <LibC/fd_set.h>
  34. #include <LibC/signal_numbers.h>
  35. namespace Kernel {
  36. namespace Memory {
  37. extern RecursiveSpinlock s_mm_lock;
  38. }
  39. enum class DispatchSignalResult {
  40. Deferred = 0,
  41. Yield,
  42. Terminate,
  43. Continue
  44. };
  45. struct SignalActionData {
  46. VirtualAddress handler_or_sigaction;
  47. u32 mask { 0 };
  48. int flags { 0 };
  49. };
  50. struct ThreadSpecificData {
  51. ThreadSpecificData* self;
  52. };
  53. #define THREAD_PRIORITY_MIN 1
  54. #define THREAD_PRIORITY_LOW 10
  55. #define THREAD_PRIORITY_NORMAL 30
  56. #define THREAD_PRIORITY_HIGH 50
  57. #define THREAD_PRIORITY_MAX 99
  58. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  59. struct ThreadRegisters {
  60. #if ARCH(I386)
  61. FlatPtr ss;
  62. FlatPtr gs;
  63. FlatPtr fs;
  64. FlatPtr es;
  65. FlatPtr ds;
  66. FlatPtr edi;
  67. FlatPtr esi;
  68. FlatPtr ebp;
  69. FlatPtr esp;
  70. FlatPtr ebx;
  71. FlatPtr edx;
  72. FlatPtr ecx;
  73. FlatPtr eax;
  74. FlatPtr eip;
  75. FlatPtr esp0;
  76. FlatPtr ss0;
  77. #else
  78. FlatPtr rdi;
  79. FlatPtr rsi;
  80. FlatPtr rbp;
  81. FlatPtr rsp;
  82. FlatPtr rbx;
  83. FlatPtr rdx;
  84. FlatPtr rcx;
  85. FlatPtr rax;
  86. FlatPtr r8;
  87. FlatPtr r9;
  88. FlatPtr r10;
  89. FlatPtr r11;
  90. FlatPtr r12;
  91. FlatPtr r13;
  92. FlatPtr r14;
  93. FlatPtr r15;
  94. FlatPtr rip;
  95. FlatPtr rsp0;
  96. #endif
  97. FlatPtr cs;
  98. #if ARCH(I386)
  99. FlatPtr eflags;
  100. FlatPtr flags() const { return eflags; }
  101. void set_flags(FlatPtr value) { eflags = value; }
  102. void set_sp(FlatPtr value) { esp = value; }
  103. void set_sp0(FlatPtr value) { esp0 = value; }
  104. void set_ip(FlatPtr value) { eip = value; }
  105. #else
  106. FlatPtr rflags;
  107. FlatPtr flags() const { return rflags; }
  108. void set_flags(FlatPtr value) { rflags = value; }
  109. void set_sp(FlatPtr value) { rsp = value; }
  110. void set_sp0(FlatPtr value) { rsp0 = value; }
  111. void set_ip(FlatPtr value) { rip = value; }
  112. #endif
  113. FlatPtr cr3;
  114. FlatPtr ip() const
  115. {
  116. #if ARCH(I386)
  117. return eip;
  118. #else
  119. return rip;
  120. #endif
  121. }
  122. FlatPtr sp() const
  123. {
  124. #if ARCH(I386)
  125. return esp;
  126. #else
  127. return rsp;
  128. #endif
  129. }
  130. };
  131. class Thread
  132. : public ListedRefCounted<Thread>
  133. , public Weakable<Thread> {
  134. AK_MAKE_NONCOPYABLE(Thread);
  135. AK_MAKE_NONMOVABLE(Thread);
  136. friend class Mutex;
  137. friend class Process;
  138. friend class Scheduler;
  139. friend struct ThreadReadyQueue;
  140. public:
  141. inline static Thread* current()
  142. {
  143. return Processor::current_thread();
  144. }
  145. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  146. ~Thread();
  147. static RefPtr<Thread> from_tid(ThreadID);
  148. static void finalize_dying_threads();
  149. ThreadID tid() const { return m_tid; }
  150. ProcessID pid() const;
  151. void set_priority(u32 p) { m_priority = p; }
  152. u32 priority() const { return m_priority; }
  153. void detach()
  154. {
  155. SpinlockLocker lock(m_lock);
  156. m_is_joinable = false;
  157. }
  158. [[nodiscard]] bool is_joinable() const
  159. {
  160. SpinlockLocker lock(m_lock);
  161. return m_is_joinable;
  162. }
  163. Process& process() { return m_process; }
  164. const Process& process() const { return m_process; }
  165. // NOTE: This returns a null-terminated string.
  166. StringView name() const
  167. {
  168. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  169. VERIFY(m_lock.own_lock());
  170. return m_name ? m_name->view() : StringView {};
  171. }
  172. void set_name(OwnPtr<KString> name)
  173. {
  174. SpinlockLocker lock(m_lock);
  175. m_name = move(name);
  176. }
  177. void finalize();
  178. enum State : u8 {
  179. Invalid = 0,
  180. Runnable,
  181. Running,
  182. Dying,
  183. Dead,
  184. Stopped,
  185. Blocked
  186. };
  187. class [[nodiscard]] BlockResult {
  188. public:
  189. enum Type {
  190. WokeNormally,
  191. NotBlocked,
  192. InterruptedBySignal,
  193. InterruptedByDeath,
  194. InterruptedByTimeout,
  195. };
  196. BlockResult() = delete;
  197. BlockResult(Type type)
  198. : m_type(type)
  199. {
  200. }
  201. bool operator==(Type type) const
  202. {
  203. return m_type == type;
  204. }
  205. bool operator!=(Type type) const
  206. {
  207. return m_type != type;
  208. }
  209. [[nodiscard]] bool was_interrupted() const
  210. {
  211. switch (m_type) {
  212. case InterruptedBySignal:
  213. case InterruptedByDeath:
  214. return true;
  215. default:
  216. return false;
  217. }
  218. }
  219. private:
  220. Type m_type;
  221. };
  222. class BlockTimeout {
  223. public:
  224. BlockTimeout()
  225. : m_infinite(true)
  226. {
  227. }
  228. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  229. const Time& absolute_time() const { return m_time; }
  230. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  231. clockid_t clock_id() const { return m_clock_id; }
  232. bool is_infinite() const { return m_infinite; }
  233. private:
  234. Time m_time {};
  235. Time m_start_time {};
  236. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  237. bool m_infinite { false };
  238. bool m_should_block { false };
  239. };
  240. class BlockerSet;
  241. class Blocker {
  242. AK_MAKE_NONMOVABLE(Blocker);
  243. AK_MAKE_NONCOPYABLE(Blocker);
  244. public:
  245. enum class Type {
  246. Unknown = 0,
  247. File,
  248. Futex,
  249. Plan9FS,
  250. Join,
  251. Queue,
  252. Routing,
  253. Sleep,
  254. Wait
  255. };
  256. virtual ~Blocker();
  257. virtual StringView state_string() const = 0;
  258. virtual Type blocker_type() const = 0;
  259. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  260. virtual bool can_be_interrupted() const { return true; }
  261. virtual bool setup_blocker();
  262. Thread& thread() { return m_thread; }
  263. enum class UnblockImmediatelyReason {
  264. UnblockConditionAlreadyMet,
  265. TimeoutInThePast,
  266. };
  267. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
  268. virtual void was_unblocked(bool did_timeout)
  269. {
  270. if (did_timeout) {
  271. SpinlockLocker lock(m_lock);
  272. m_did_timeout = true;
  273. }
  274. }
  275. void set_interrupted_by_death()
  276. {
  277. SpinlockLocker lock(m_lock);
  278. do_set_interrupted_by_death();
  279. }
  280. void set_interrupted_by_signal(u8 signal)
  281. {
  282. SpinlockLocker lock(m_lock);
  283. do_set_interrupted_by_signal(signal);
  284. }
  285. u8 was_interrupted_by_signal() const
  286. {
  287. SpinlockLocker lock(m_lock);
  288. return do_get_interrupted_by_signal();
  289. }
  290. virtual Thread::BlockResult block_result()
  291. {
  292. SpinlockLocker lock(m_lock);
  293. if (m_was_interrupted_by_death)
  294. return Thread::BlockResult::InterruptedByDeath;
  295. if (m_was_interrupted_by_signal != 0)
  296. return Thread::BlockResult::InterruptedBySignal;
  297. if (m_did_timeout)
  298. return Thread::BlockResult::InterruptedByTimeout;
  299. return Thread::BlockResult::WokeNormally;
  300. }
  301. void begin_blocking(Badge<Thread>);
  302. BlockResult end_blocking(Badge<Thread>, bool);
  303. protected:
  304. Blocker()
  305. : m_thread(*Thread::current())
  306. {
  307. }
  308. void do_set_interrupted_by_death()
  309. {
  310. m_was_interrupted_by_death = true;
  311. }
  312. void do_set_interrupted_by_signal(u8 signal)
  313. {
  314. VERIFY(signal != 0);
  315. m_was_interrupted_by_signal = signal;
  316. }
  317. void do_clear_interrupted_by_signal()
  318. {
  319. m_was_interrupted_by_signal = 0;
  320. }
  321. u8 do_get_interrupted_by_signal() const
  322. {
  323. return m_was_interrupted_by_signal;
  324. }
  325. [[nodiscard]] bool was_interrupted() const
  326. {
  327. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  328. }
  329. void unblock_from_blocker()
  330. {
  331. {
  332. SpinlockLocker lock(m_lock);
  333. if (!m_is_blocking)
  334. return;
  335. m_is_blocking = false;
  336. }
  337. m_thread->unblock_from_blocker(*this);
  338. }
  339. bool add_to_blocker_set(BlockerSet&, void* = nullptr);
  340. void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
  341. mutable RecursiveSpinlock m_lock;
  342. private:
  343. BlockerSet* m_blocker_set { nullptr };
  344. NonnullRefPtr<Thread> m_thread;
  345. u8 m_was_interrupted_by_signal { 0 };
  346. bool m_is_blocking { false };
  347. bool m_was_interrupted_by_death { false };
  348. bool m_did_timeout { false };
  349. };
  350. class BlockerSet {
  351. AK_MAKE_NONCOPYABLE(BlockerSet);
  352. AK_MAKE_NONMOVABLE(BlockerSet);
  353. public:
  354. BlockerSet() = default;
  355. virtual ~BlockerSet()
  356. {
  357. VERIFY(!m_lock.is_locked());
  358. VERIFY(m_blockers.is_empty());
  359. }
  360. bool add_blocker(Blocker& blocker, void* data)
  361. {
  362. SpinlockLocker lock(m_lock);
  363. if (!should_add_blocker(blocker, data))
  364. return false;
  365. m_blockers.append({ &blocker, data });
  366. return true;
  367. }
  368. void remove_blocker(Blocker& blocker)
  369. {
  370. SpinlockLocker lock(m_lock);
  371. // NOTE: it's possible that the blocker is no longer present
  372. m_blockers.remove_all_matching([&](auto& info) {
  373. return info.blocker == &blocker;
  374. });
  375. }
  376. bool is_empty() const
  377. {
  378. SpinlockLocker lock(m_lock);
  379. return is_empty_locked();
  380. }
  381. protected:
  382. template<typename Callback>
  383. bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
  384. {
  385. SpinlockLocker lock(m_lock);
  386. return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
  387. }
  388. template<typename Callback>
  389. bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
  390. {
  391. VERIFY(m_lock.is_locked());
  392. bool stop_iterating = false;
  393. bool did_unblock_any = false;
  394. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  395. auto& info = m_blockers[i];
  396. if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
  397. m_blockers.remove(i);
  398. did_unblock_any = true;
  399. continue;
  400. }
  401. i++;
  402. }
  403. return did_unblock_any;
  404. }
  405. bool is_empty_locked() const
  406. {
  407. VERIFY(m_lock.is_locked());
  408. return m_blockers.is_empty();
  409. }
  410. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  411. struct BlockerInfo {
  412. Blocker* blocker;
  413. void* data;
  414. };
  415. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  416. {
  417. if (m_blockers.size() <= count)
  418. return move(m_blockers);
  419. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  420. VERIFY(move_count > 0);
  421. Vector<BlockerInfo, 4> taken_blockers;
  422. taken_blockers.ensure_capacity(move_count);
  423. for (size_t i = 0; i < move_count; i++)
  424. taken_blockers.append(m_blockers.take(i));
  425. m_blockers.remove(0, move_count);
  426. return taken_blockers;
  427. }
  428. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  429. {
  430. if (blockers_to_append.is_empty())
  431. return;
  432. if (m_blockers.is_empty()) {
  433. m_blockers = move(blockers_to_append);
  434. return;
  435. }
  436. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  437. for (size_t i = 0; i < blockers_to_append.size(); i++)
  438. m_blockers.append(blockers_to_append.take(i));
  439. blockers_to_append.clear();
  440. }
  441. mutable Spinlock<u8> m_lock;
  442. private:
  443. Vector<BlockerInfo, 4> m_blockers;
  444. };
  445. friend class JoinBlocker;
  446. class JoinBlocker final : public Blocker {
  447. public:
  448. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  449. virtual Type blocker_type() const override { return Type::Join; }
  450. virtual StringView state_string() const override { return "Joining"sv; }
  451. virtual bool can_be_interrupted() const override { return false; }
  452. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  453. virtual bool setup_blocker() override;
  454. bool unblock(void*, bool);
  455. private:
  456. NonnullRefPtr<Thread> m_joinee;
  457. void*& m_joinee_exit_value;
  458. KResult& m_try_join_result;
  459. bool m_join_error { false };
  460. bool m_did_unblock { false };
  461. bool m_should_block { true };
  462. };
  463. class WaitQueueBlocker final : public Blocker {
  464. public:
  465. explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
  466. virtual ~WaitQueueBlocker();
  467. virtual Type blocker_type() const override { return Type::Queue; }
  468. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  469. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  470. virtual bool setup_blocker() override;
  471. bool unblock();
  472. protected:
  473. WaitQueue& m_wait_queue;
  474. StringView m_block_reason;
  475. bool m_should_block { true };
  476. bool m_did_unblock { false };
  477. };
  478. class FutexBlocker final : public Blocker {
  479. public:
  480. explicit FutexBlocker(FutexQueue&, u32);
  481. virtual ~FutexBlocker();
  482. virtual Type blocker_type() const override { return Type::Futex; }
  483. virtual StringView state_string() const override { return "Futex"sv; }
  484. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  485. virtual bool setup_blocker() override;
  486. u32 bitset() const { return m_bitset; }
  487. void begin_requeue()
  488. {
  489. // We need to hold the lock until we moved it over
  490. m_relock_flags = m_lock.lock();
  491. }
  492. void finish_requeue(FutexQueue&);
  493. bool unblock_bitset(u32 bitset);
  494. bool unblock(bool force = false);
  495. protected:
  496. FutexQueue& m_futex_queue;
  497. u32 m_bitset { 0 };
  498. u32 m_relock_flags { 0 };
  499. bool m_should_block { true };
  500. bool m_did_unblock { false };
  501. };
  502. class FileBlocker : public Blocker {
  503. public:
  504. enum class BlockFlags : u16 {
  505. None = 0,
  506. Read = 1 << 0,
  507. Write = 1 << 1,
  508. ReadPriority = 1 << 2,
  509. Accept = 1 << 3,
  510. Connect = 1 << 4,
  511. SocketFlags = Accept | Connect,
  512. WriteNotOpen = 1 << 5,
  513. WriteError = 1 << 6,
  514. WriteHangUp = 1 << 7,
  515. ReadHangUp = 1 << 8,
  516. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  517. };
  518. virtual Type blocker_type() const override { return Type::File; }
  519. virtual bool unblock(bool, void*) = 0;
  520. protected:
  521. bool m_should_block { true };
  522. };
  523. class FileDescriptionBlocker : public FileBlocker {
  524. public:
  525. const FileDescription& blocked_description() const;
  526. virtual bool unblock(bool, void*) override;
  527. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  528. virtual bool setup_blocker() override;
  529. protected:
  530. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  531. private:
  532. NonnullRefPtr<FileDescription> m_blocked_description;
  533. const BlockFlags m_flags;
  534. BlockFlags& m_unblocked_flags;
  535. bool m_did_unblock { false };
  536. };
  537. class AcceptBlocker final : public FileDescriptionBlocker {
  538. public:
  539. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  540. virtual StringView state_string() const override { return "Accepting"sv; }
  541. };
  542. class ConnectBlocker final : public FileDescriptionBlocker {
  543. public:
  544. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  545. virtual StringView state_string() const override { return "Connecting"sv; }
  546. };
  547. class WriteBlocker final : public FileDescriptionBlocker {
  548. public:
  549. explicit WriteBlocker(FileDescription&, BlockFlags&);
  550. virtual StringView state_string() const override { return "Writing"sv; }
  551. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  552. private:
  553. BlockTimeout m_timeout;
  554. };
  555. class ReadBlocker final : public FileDescriptionBlocker {
  556. public:
  557. explicit ReadBlocker(FileDescription&, BlockFlags&);
  558. virtual StringView state_string() const override { return "Reading"sv; }
  559. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  560. private:
  561. BlockTimeout m_timeout;
  562. };
  563. class SleepBlocker final : public Blocker {
  564. public:
  565. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  566. virtual StringView state_string() const override { return "Sleeping"sv; }
  567. virtual Type blocker_type() const override { return Type::Sleep; }
  568. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  569. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  570. virtual void was_unblocked(bool) override;
  571. virtual Thread::BlockResult block_result() override;
  572. private:
  573. void calculate_remaining();
  574. BlockTimeout m_deadline;
  575. Time* m_remaining;
  576. };
  577. class SelectBlocker final : public FileBlocker {
  578. public:
  579. struct FDInfo {
  580. NonnullRefPtr<FileDescription> description;
  581. BlockFlags block_flags { BlockFlags::None };
  582. BlockFlags unblocked_flags { BlockFlags::None };
  583. };
  584. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  585. explicit SelectBlocker(FDVector&);
  586. virtual ~SelectBlocker();
  587. virtual bool unblock(bool, void*) override;
  588. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  589. virtual void was_unblocked(bool) override;
  590. virtual StringView state_string() const override { return "Selecting"sv; }
  591. virtual bool setup_blocker() override;
  592. private:
  593. size_t collect_unblocked_flags();
  594. FDVector& m_fds;
  595. bool m_did_unblock { false };
  596. };
  597. class WaitBlocker final : public Blocker {
  598. public:
  599. enum class UnblockFlags {
  600. Terminated,
  601. Stopped,
  602. Continued,
  603. Disowned
  604. };
  605. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  606. virtual StringView state_string() const override { return "Waiting"sv; }
  607. virtual Type blocker_type() const override { return Type::Wait; }
  608. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  609. virtual void was_unblocked(bool) override;
  610. virtual bool setup_blocker() override;
  611. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  612. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  613. private:
  614. void do_was_disowned();
  615. void do_set_result(const siginfo_t&);
  616. const int m_wait_options;
  617. const idtype_t m_id_type;
  618. const pid_t m_waitee_id;
  619. KResultOr<siginfo_t>& m_result;
  620. RefPtr<Process> m_waitee;
  621. RefPtr<ProcessGroup> m_waitee_group;
  622. bool m_did_unblock { false };
  623. bool m_error { false };
  624. bool m_got_sigchild { false };
  625. bool m_should_block;
  626. };
  627. class WaitBlockerSet final : public BlockerSet {
  628. friend class WaitBlocker;
  629. public:
  630. explicit WaitBlockerSet(Process& process)
  631. : m_process(process)
  632. {
  633. }
  634. void disowned_by_waiter(Process&);
  635. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  636. void try_unblock(WaitBlocker&);
  637. void finalize();
  638. protected:
  639. virtual bool should_add_blocker(Blocker&, void*) override;
  640. private:
  641. struct ProcessBlockInfo {
  642. NonnullRefPtr<Process> process;
  643. WaitBlocker::UnblockFlags flags;
  644. u8 signal;
  645. bool was_waited { false };
  646. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  647. ~ProcessBlockInfo();
  648. };
  649. Process& m_process;
  650. Vector<ProcessBlockInfo, 2> m_processes;
  651. bool m_finalized { false };
  652. };
  653. template<typename AddBlockerHandler>
  654. KResult try_join(AddBlockerHandler add_blocker)
  655. {
  656. if (Thread::current() == this)
  657. return EDEADLK;
  658. SpinlockLocker lock(m_lock);
  659. if (!m_is_joinable || state() == Dead)
  660. return EINVAL;
  661. add_blocker();
  662. // From this point on the thread is no longer joinable by anyone
  663. // else. It also means that if the join is timed, it becomes
  664. // detached when a timeout happens.
  665. m_is_joinable = false;
  666. return KSuccess;
  667. }
  668. void did_schedule() { ++m_times_scheduled; }
  669. u32 times_scheduled() const { return m_times_scheduled; }
  670. void resume_from_stopped();
  671. [[nodiscard]] bool should_be_stopped() const;
  672. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  673. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  674. [[nodiscard]] bool is_in_block() const
  675. {
  676. SpinlockLocker lock(m_block_lock);
  677. return m_in_block;
  678. }
  679. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  680. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  681. u32 affinity() const { return m_cpu_affinity; }
  682. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  683. RegisterState& get_register_dump_from_stack();
  684. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  685. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  686. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  687. ThreadRegisters& regs() { return m_regs; }
  688. ThreadRegisters const& regs() const { return m_regs; }
  689. State state() const { return m_state; }
  690. StringView state_string() const;
  691. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  692. size_t thread_specific_region_size() const;
  693. size_t thread_specific_region_alignment() const;
  694. ALWAYS_INLINE void yield_if_stopped()
  695. {
  696. // If some thread stopped us, we need to yield to someone else
  697. // We check this when entering/exiting a system call. A thread
  698. // may continue to execute in user land until the next timer
  699. // tick or entering the next system call, or if it's in kernel
  700. // mode then we will intercept prior to returning back to user
  701. // mode.
  702. SpinlockLocker lock(m_lock);
  703. while (state() == Thread::Stopped) {
  704. lock.unlock();
  705. // We shouldn't be holding the big lock here
  706. yield_without_releasing_big_lock();
  707. lock.lock();
  708. }
  709. }
  710. void block(Kernel::Mutex&, SpinlockLocker<Spinlock<u8>>&, u32);
  711. template<typename BlockerType, class... Args>
  712. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  713. {
  714. VERIFY(!Processor::current_in_irq());
  715. VERIFY(this == Thread::current());
  716. ScopedCritical critical;
  717. VERIFY(!Memory::s_mm_lock.own_lock());
  718. SpinlockLocker block_lock(m_block_lock);
  719. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  720. // as it is constructed and registered elsewhere
  721. VERIFY(!m_in_block);
  722. m_in_block = true;
  723. BlockerType blocker(forward<Args>(args)...);
  724. if (!blocker.setup_blocker()) {
  725. blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::UnblockConditionAlreadyMet);
  726. m_in_block = false;
  727. return BlockResult::NotBlocked;
  728. }
  729. SpinlockLocker scheduler_lock(g_scheduler_lock);
  730. // Relaxed semantics are fine for timeout_unblocked because we
  731. // synchronize on the spin locks already.
  732. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  733. bool timer_was_added = false;
  734. {
  735. switch (state()) {
  736. case Thread::Stopped:
  737. // It's possible that we were requested to be stopped!
  738. break;
  739. case Thread::Running:
  740. VERIFY(m_blocker == nullptr);
  741. break;
  742. default:
  743. VERIFY_NOT_REACHED();
  744. }
  745. m_blocker = &blocker;
  746. auto& block_timeout = blocker.override_timeout(timeout);
  747. if (!block_timeout.is_infinite()) {
  748. // Process::kill_all_threads may be called at any time, which will mark all
  749. // threads to die. In that case
  750. timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  751. VERIFY(!Processor::current_in_irq());
  752. VERIFY(!g_scheduler_lock.own_lock());
  753. VERIFY(!m_block_lock.own_lock());
  754. // NOTE: this may execute on the same or any other processor!
  755. SpinlockLocker scheduler_lock(g_scheduler_lock);
  756. SpinlockLocker block_lock(m_block_lock);
  757. if (m_blocker && timeout_unblocked.exchange(true) == false)
  758. unblock();
  759. });
  760. if (!timer_was_added) {
  761. // Timeout is already in the past
  762. blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::TimeoutInThePast);
  763. m_blocker = nullptr;
  764. m_in_block = false;
  765. return BlockResult::InterruptedByTimeout;
  766. }
  767. }
  768. blocker.begin_blocking({});
  769. set_state(Thread::Blocked);
  770. }
  771. scheduler_lock.unlock();
  772. block_lock.unlock();
  773. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
  774. bool did_timeout = false;
  775. u32 lock_count_to_restore = 0;
  776. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  777. for (;;) {
  778. // Yield to the scheduler, and wait for us to resume unblocked.
  779. VERIFY(!g_scheduler_lock.own_lock());
  780. VERIFY(Processor::in_critical());
  781. yield_without_releasing_big_lock();
  782. VERIFY(Processor::in_critical());
  783. SpinlockLocker block_lock2(m_block_lock);
  784. if (should_be_stopped() || state() == Stopped) {
  785. dbgln("Thread should be stopped, current state: {}", state_string());
  786. set_state(Thread::Blocked);
  787. continue;
  788. }
  789. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  790. block_lock2.unlock();
  791. dbgln("Thread should not be unblocking, current state: {}", state_string());
  792. set_state(Thread::Blocked);
  793. continue;
  794. }
  795. // Prevent the timeout from unblocking this thread if it happens to
  796. // be in the process of firing already
  797. did_timeout |= timeout_unblocked.exchange(true);
  798. if (m_blocker) {
  799. // Remove ourselves...
  800. VERIFY(m_blocker == &blocker);
  801. m_blocker = nullptr;
  802. }
  803. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
  804. m_in_block = false;
  805. break;
  806. }
  807. if (blocker.was_interrupted_by_signal()) {
  808. SpinlockLocker scheduler_lock(g_scheduler_lock);
  809. SpinlockLocker lock(m_lock);
  810. dispatch_one_pending_signal();
  811. }
  812. // Notify the blocker that we are no longer blocking. It may need
  813. // to clean up now while we're still holding m_lock
  814. auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
  815. if (timer_was_added && !did_timeout) {
  816. // Cancel the timer while not holding any locks. This allows
  817. // the timer function to complete before we remove it
  818. // (e.g. if it's on another processor)
  819. TimerQueue::the().cancel_timer(*m_block_timer);
  820. }
  821. if (previous_locked != LockMode::Unlocked) {
  822. // NOTE: this may trigger another call to Thread::block(), so
  823. // we need to do this after we're all done and restored m_in_block!
  824. relock_process(previous_locked, lock_count_to_restore);
  825. }
  826. return result;
  827. }
  828. u32 unblock_from_lock(Kernel::Mutex&);
  829. void unblock_from_blocker(Blocker&);
  830. void unblock(u8 signal = 0);
  831. template<class... Args>
  832. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  833. {
  834. VERIFY(this == Thread::current());
  835. return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  836. }
  837. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  838. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  839. {
  840. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  841. }
  842. BlockResult sleep_until(clockid_t, const Time&);
  843. BlockResult sleep_until(const Time& duration)
  844. {
  845. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  846. }
  847. // Tell this thread to unblock if needed,
  848. // gracefully unwind the stack and die.
  849. void set_should_die();
  850. [[nodiscard]] bool should_die() const { return m_should_die; }
  851. void die_if_needed();
  852. void exit(void* = nullptr);
  853. void update_time_scheduled(u64, bool, bool);
  854. bool tick();
  855. void set_ticks_left(u32 t) { m_ticks_left = t; }
  856. u32 ticks_left() const { return m_ticks_left; }
  857. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  858. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  859. void set_state(State, u8 = 0);
  860. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  861. void set_initialized(bool initialized) { m_initialized = initialized; }
  862. void send_urgent_signal_to_self(u8 signal);
  863. void send_signal(u8 signal, Process* sender);
  864. u32 update_signal_mask(u32 signal_mask);
  865. u32 signal_mask_block(sigset_t signal_set, bool block);
  866. u32 signal_mask() const;
  867. void clear_signals();
  868. KResultOr<u32> peek_debug_register(u32 register_index);
  869. KResult poke_debug_register(u32 register_index, u32 data);
  870. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  871. DispatchSignalResult dispatch_one_pending_signal();
  872. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  873. DispatchSignalResult dispatch_signal(u8 signal);
  874. void check_dispatch_pending_signal();
  875. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  876. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  877. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  878. u32 pending_signals() const;
  879. u32 pending_signals_for_state() const;
  880. FPUState& fpu_state() { return m_fpu_state; }
  881. KResult make_thread_specific_region(Badge<Process>);
  882. unsigned syscall_count() const { return m_syscall_count; }
  883. void did_syscall() { ++m_syscall_count; }
  884. unsigned inode_faults() const { return m_inode_faults; }
  885. void did_inode_fault() { ++m_inode_faults; }
  886. unsigned zero_faults() const { return m_zero_faults; }
  887. void did_zero_fault() { ++m_zero_faults; }
  888. unsigned cow_faults() const { return m_cow_faults; }
  889. void did_cow_fault() { ++m_cow_faults; }
  890. unsigned file_read_bytes() const { return m_file_read_bytes; }
  891. unsigned file_write_bytes() const { return m_file_write_bytes; }
  892. void did_file_read(unsigned bytes)
  893. {
  894. m_file_read_bytes += bytes;
  895. }
  896. void did_file_write(unsigned bytes)
  897. {
  898. m_file_write_bytes += bytes;
  899. }
  900. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  901. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  902. void did_unix_socket_read(unsigned bytes)
  903. {
  904. m_unix_socket_read_bytes += bytes;
  905. }
  906. void did_unix_socket_write(unsigned bytes)
  907. {
  908. m_unix_socket_write_bytes += bytes;
  909. }
  910. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  911. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  912. void did_ipv4_socket_read(unsigned bytes)
  913. {
  914. m_ipv4_socket_read_bytes += bytes;
  915. }
  916. void did_ipv4_socket_write(unsigned bytes)
  917. {
  918. m_ipv4_socket_write_bytes += bytes;
  919. }
  920. void set_active(bool active) { m_is_active = active; }
  921. u32 saved_critical() const { return m_saved_critical; }
  922. void save_critical(u32 critical) { m_saved_critical = critical; }
  923. [[nodiscard]] bool is_active() const { return m_is_active; }
  924. [[nodiscard]] bool is_finalizable() const
  925. {
  926. // We can't finalize as long as this thread is still running
  927. // Note that checking for Running state here isn't sufficient
  928. // as the thread may not be in Running state but switching out.
  929. // m_is_active is set to false once the context switch is
  930. // complete and the thread is not executing on any processor.
  931. if (m_is_active.load(AK::memory_order_acquire))
  932. return false;
  933. // We can't finalize until the thread is either detached or
  934. // a join has started. We can't make m_is_joinable atomic
  935. // because that would introduce a race in try_join.
  936. SpinlockLocker lock(m_lock);
  937. return !m_is_joinable;
  938. }
  939. RefPtr<Thread> clone(Process&);
  940. template<IteratorFunction<Thread&> Callback>
  941. static IterationDecision for_each_in_state(State, Callback);
  942. template<IteratorFunction<Thread&> Callback>
  943. static IterationDecision for_each(Callback);
  944. template<VoidFunction<Thread&> Callback>
  945. static IterationDecision for_each_in_state(State, Callback);
  946. template<VoidFunction<Thread&> Callback>
  947. static IterationDecision for_each(Callback);
  948. static constexpr u32 default_kernel_stack_size = 65536;
  949. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  950. u64 time_in_user() const { return m_total_time_scheduled_user; }
  951. u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
  952. enum class PreviousMode : u8 {
  953. KernelMode = 0,
  954. UserMode
  955. };
  956. PreviousMode previous_mode() const { return m_previous_mode; }
  957. bool set_previous_mode(PreviousMode mode)
  958. {
  959. if (m_previous_mode == mode)
  960. return false;
  961. m_previous_mode = mode;
  962. return true;
  963. }
  964. TrapFrame*& current_trap() { return m_current_trap; }
  965. TrapFrame const* const& current_trap() const { return m_current_trap; }
  966. RecursiveSpinlock& get_lock() const { return m_lock; }
  967. #if LOCK_DEBUG
  968. void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
  969. {
  970. VERIFY(refs_delta != 0);
  971. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  972. SpinlockLocker list_lock(m_holding_locks_lock);
  973. if (refs_delta > 0) {
  974. bool have_existing = false;
  975. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  976. auto& info = m_holding_locks_list[i];
  977. if (info.lock == &lock) {
  978. have_existing = true;
  979. info.count += refs_delta;
  980. break;
  981. }
  982. }
  983. if (!have_existing)
  984. m_holding_locks_list.append({ &lock, location, 1 });
  985. } else {
  986. VERIFY(refs_delta < 0);
  987. bool found = false;
  988. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  989. auto& info = m_holding_locks_list[i];
  990. if (info.lock == &lock) {
  991. VERIFY(info.count >= (unsigned)-refs_delta);
  992. info.count -= (unsigned)-refs_delta;
  993. if (info.count == 0)
  994. m_holding_locks_list.remove(i);
  995. found = true;
  996. break;
  997. }
  998. }
  999. VERIFY(found);
  1000. }
  1001. }
  1002. u32 lock_count() const
  1003. {
  1004. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  1005. }
  1006. #endif
  1007. bool is_handling_page_fault() const
  1008. {
  1009. return m_handling_page_fault;
  1010. }
  1011. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  1012. void set_idle_thread() { m_is_idle_thread = true; }
  1013. bool is_idle_thread() const { return m_is_idle_thread; }
  1014. ALWAYS_INLINE u32 enter_profiler()
  1015. {
  1016. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  1017. }
  1018. ALWAYS_INLINE u32 leave_profiler()
  1019. {
  1020. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  1021. }
  1022. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  1023. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  1024. String backtrace();
  1025. private:
  1026. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, OwnPtr<KString>);
  1027. IntrusiveListNode<Thread> m_process_thread_list_node;
  1028. int m_runnable_priority { -1 };
  1029. friend class WaitQueue;
  1030. class JoinBlockerSet final : public BlockerSet {
  1031. public:
  1032. void thread_did_exit(void* exit_value)
  1033. {
  1034. SpinlockLocker lock(m_lock);
  1035. VERIFY(!m_thread_did_exit);
  1036. m_thread_did_exit = true;
  1037. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1038. do_unblock_joiner();
  1039. }
  1040. void thread_finalizing()
  1041. {
  1042. SpinlockLocker lock(m_lock);
  1043. do_unblock_joiner();
  1044. }
  1045. void* exit_value() const
  1046. {
  1047. VERIFY(m_thread_did_exit);
  1048. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1049. }
  1050. void try_unblock(JoinBlocker& blocker)
  1051. {
  1052. SpinlockLocker lock(m_lock);
  1053. if (m_thread_did_exit)
  1054. blocker.unblock(exit_value(), false);
  1055. }
  1056. protected:
  1057. virtual bool should_add_blocker(Blocker& b, void*) override
  1058. {
  1059. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1060. auto& blocker = static_cast<JoinBlocker&>(b);
  1061. // NOTE: m_lock is held already!
  1062. if (m_thread_did_exit) {
  1063. blocker.unblock(exit_value(), true);
  1064. return false;
  1065. }
  1066. return true;
  1067. }
  1068. private:
  1069. void do_unblock_joiner()
  1070. {
  1071. unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
  1072. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1073. auto& blocker = static_cast<JoinBlocker&>(b);
  1074. return blocker.unblock(exit_value(), false);
  1075. });
  1076. }
  1077. Atomic<void*> m_exit_value { nullptr };
  1078. bool m_thread_did_exit { false };
  1079. };
  1080. LockMode unlock_process_if_locked(u32&);
  1081. void relock_process(LockMode, u32);
  1082. void reset_fpu_state();
  1083. mutable RecursiveSpinlock m_lock;
  1084. mutable RecursiveSpinlock m_block_lock;
  1085. NonnullRefPtr<Process> m_process;
  1086. ThreadID m_tid { -1 };
  1087. ThreadRegisters m_regs {};
  1088. DebugRegisterState m_debug_register_state {};
  1089. TrapFrame* m_current_trap { nullptr };
  1090. u32 m_saved_critical { 1 };
  1091. IntrusiveListNode<Thread> m_ready_queue_node;
  1092. Atomic<u32> m_cpu { 0 };
  1093. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1094. Optional<u64> m_last_time_scheduled;
  1095. u64 m_total_time_scheduled_user { 0 };
  1096. u64 m_total_time_scheduled_kernel { 0 };
  1097. u32 m_ticks_left { 0 };
  1098. u32 m_times_scheduled { 0 };
  1099. u32 m_ticks_in_user { 0 };
  1100. u32 m_ticks_in_kernel { 0 };
  1101. u32 m_pending_signals { 0 };
  1102. u32 m_signal_mask { 0 };
  1103. FlatPtr m_kernel_stack_base { 0 };
  1104. FlatPtr m_kernel_stack_top { 0 };
  1105. OwnPtr<Memory::Region> m_kernel_stack_region;
  1106. VirtualAddress m_thread_specific_data;
  1107. Optional<Memory::VirtualRange> m_thread_specific_range;
  1108. Array<SignalActionData, NSIG> m_signal_action_data;
  1109. Blocker* m_blocker { nullptr };
  1110. Kernel::Mutex* m_blocking_lock { nullptr };
  1111. u32 m_lock_requested_count { 0 };
  1112. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1113. #if LOCK_DEBUG
  1114. struct HoldingLockInfo {
  1115. Mutex* lock;
  1116. LockLocation lock_location;
  1117. unsigned count;
  1118. };
  1119. Atomic<u32> m_holding_locks { 0 };
  1120. Spinlock<u8> m_holding_locks_lock;
  1121. Vector<HoldingLockInfo> m_holding_locks_list;
  1122. #endif
  1123. JoinBlockerSet m_join_blocker_set;
  1124. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1125. bool m_is_joinable { true };
  1126. bool m_handling_page_fault { false };
  1127. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1128. unsigned m_syscall_count { 0 };
  1129. unsigned m_inode_faults { 0 };
  1130. unsigned m_zero_faults { 0 };
  1131. unsigned m_cow_faults { 0 };
  1132. unsigned m_file_read_bytes { 0 };
  1133. unsigned m_file_write_bytes { 0 };
  1134. unsigned m_unix_socket_read_bytes { 0 };
  1135. unsigned m_unix_socket_write_bytes { 0 };
  1136. unsigned m_ipv4_socket_read_bytes { 0 };
  1137. unsigned m_ipv4_socket_write_bytes { 0 };
  1138. FPUState m_fpu_state {};
  1139. State m_state { Invalid };
  1140. OwnPtr<KString> m_name;
  1141. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1142. State m_stop_state { Invalid };
  1143. bool m_dump_backtrace_on_finalization { false };
  1144. bool m_should_die { false };
  1145. bool m_initialized { false };
  1146. bool m_in_block { false };
  1147. bool m_is_idle_thread { false };
  1148. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1149. Atomic<u32> m_nested_profiler_calls { 0 };
  1150. NonnullRefPtr<Timer> m_block_timer;
  1151. bool m_is_profiling_suppressed { false };
  1152. void yield_and_release_relock_big_lock();
  1153. enum class VerifyLockNotHeld {
  1154. Yes,
  1155. No
  1156. };
  1157. void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
  1158. void drop_thread_count(bool);
  1159. mutable IntrusiveListNode<Thread> m_global_thread_list_node;
  1160. public:
  1161. using ListInProcess = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node>;
  1162. using GlobalList = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_global_thread_list_node>;
  1163. static SpinlockProtected<GlobalList>& all_instances();
  1164. };
  1165. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1166. template<IteratorFunction<Thread&> Callback>
  1167. inline IterationDecision Thread::for_each(Callback callback)
  1168. {
  1169. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1170. for (auto& thread : list) {
  1171. IterationDecision decision = callback(thread);
  1172. if (decision != IterationDecision::Continue)
  1173. return decision;
  1174. }
  1175. return IterationDecision::Continue;
  1176. });
  1177. }
  1178. template<IteratorFunction<Thread&> Callback>
  1179. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1180. {
  1181. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1182. for (auto& thread : list) {
  1183. if (thread.state() != state)
  1184. continue;
  1185. IterationDecision decision = callback(thread);
  1186. if (decision != IterationDecision::Continue)
  1187. return decision;
  1188. }
  1189. return IterationDecision::Continue;
  1190. });
  1191. }
  1192. template<VoidFunction<Thread&> Callback>
  1193. inline IterationDecision Thread::for_each(Callback callback)
  1194. {
  1195. return Thread::all_instances().with([&](auto& list) {
  1196. for (auto& thread : list) {
  1197. if (callback(thread) == IterationDecision::Break)
  1198. return IterationDecision::Break;
  1199. }
  1200. return IterationDecision::Continue;
  1201. });
  1202. }
  1203. template<VoidFunction<Thread&> Callback>
  1204. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1205. {
  1206. return for_each_in_state(state, [&](auto& thread) {
  1207. callback(thread);
  1208. return IterationDecision::Continue;
  1209. });
  1210. }
  1211. }
  1212. template<>
  1213. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1214. void format(FormatBuilder&, const Kernel::Thread&);
  1215. };