Thread.h 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/HashMap.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #ifdef LOCK_DEBUG
  14. # include <AK/SourceLocation.h>
  15. #endif
  16. #include <AK/String.h>
  17. #include <AK/Time.h>
  18. #include <AK/Vector.h>
  19. #include <AK/WeakPtr.h>
  20. #include <AK/Weakable.h>
  21. #include <Kernel/Arch/x86/RegisterState.h>
  22. #include <Kernel/Arch/x86/SafeMem.h>
  23. #include <Kernel/Debug.h>
  24. #include <Kernel/FileSystem/InodeIdentifier.h>
  25. #include <Kernel/Forward.h>
  26. #include <Kernel/KResult.h>
  27. #include <Kernel/KString.h>
  28. #include <Kernel/LockMode.h>
  29. #include <Kernel/Scheduler.h>
  30. #include <Kernel/TimerQueue.h>
  31. #include <Kernel/UnixTypes.h>
  32. #include <Kernel/VM/Range.h>
  33. #include <LibC/fd_set.h>
  34. #include <LibC/signal_numbers.h>
  35. namespace Kernel {
  36. extern RecursiveSpinLock s_mm_lock;
  37. enum class DispatchSignalResult {
  38. Deferred = 0,
  39. Yield,
  40. Terminate,
  41. Continue
  42. };
  43. struct SignalActionData {
  44. VirtualAddress handler_or_sigaction;
  45. u32 mask { 0 };
  46. int flags { 0 };
  47. };
  48. struct ThreadSpecificData {
  49. ThreadSpecificData* self;
  50. };
  51. #define THREAD_PRIORITY_MIN 1
  52. #define THREAD_PRIORITY_LOW 10
  53. #define THREAD_PRIORITY_NORMAL 30
  54. #define THREAD_PRIORITY_HIGH 50
  55. #define THREAD_PRIORITY_MAX 99
  56. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  57. struct ThreadRegisters {
  58. #if ARCH(I386)
  59. FlatPtr ss;
  60. FlatPtr gs;
  61. FlatPtr fs;
  62. FlatPtr es;
  63. FlatPtr ds;
  64. FlatPtr edi;
  65. FlatPtr esi;
  66. FlatPtr ebp;
  67. FlatPtr esp;
  68. FlatPtr ebx;
  69. FlatPtr edx;
  70. FlatPtr ecx;
  71. FlatPtr eax;
  72. FlatPtr eip;
  73. FlatPtr esp0;
  74. FlatPtr ss0;
  75. #else
  76. FlatPtr rdi;
  77. FlatPtr rsi;
  78. FlatPtr rbp;
  79. FlatPtr rsp;
  80. FlatPtr rbx;
  81. FlatPtr rdx;
  82. FlatPtr rcx;
  83. FlatPtr rax;
  84. FlatPtr r8;
  85. FlatPtr r9;
  86. FlatPtr r10;
  87. FlatPtr r11;
  88. FlatPtr r12;
  89. FlatPtr r13;
  90. FlatPtr r14;
  91. FlatPtr r15;
  92. FlatPtr rip;
  93. FlatPtr rsp0;
  94. #endif
  95. FlatPtr cs;
  96. #if ARCH(I386)
  97. FlatPtr eflags;
  98. #else
  99. FlatPtr rflags;
  100. #endif
  101. FlatPtr cr3;
  102. FlatPtr ip() const
  103. {
  104. #if ARCH(I386)
  105. return eip;
  106. #else
  107. return rip;
  108. #endif
  109. }
  110. FlatPtr sp() const
  111. {
  112. #if ARCH(I386)
  113. return esp;
  114. #else
  115. return rsp;
  116. #endif
  117. }
  118. };
  119. class Thread
  120. : public RefCounted<Thread>
  121. , public Weakable<Thread> {
  122. AK_MAKE_NONCOPYABLE(Thread);
  123. AK_MAKE_NONMOVABLE(Thread);
  124. friend class Mutex;
  125. friend class Process;
  126. friend class ProtectedProcessBase;
  127. friend class Scheduler;
  128. friend struct ThreadReadyQueue;
  129. static SpinLock<u8> g_tid_map_lock;
  130. static HashMap<ThreadID, Thread*>* g_tid_map;
  131. public:
  132. inline static Thread* current()
  133. {
  134. return Processor::current_thread();
  135. }
  136. static void initialize();
  137. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  138. ~Thread();
  139. static RefPtr<Thread> from_tid(ThreadID);
  140. static void finalize_dying_threads();
  141. ThreadID tid() const { return m_tid; }
  142. ProcessID pid() const;
  143. void set_priority(u32 p) { m_priority = p; }
  144. u32 priority() const { return m_priority; }
  145. void detach()
  146. {
  147. ScopedSpinLock lock(m_lock);
  148. m_is_joinable = false;
  149. }
  150. [[nodiscard]] bool is_joinable() const
  151. {
  152. ScopedSpinLock lock(m_lock);
  153. return m_is_joinable;
  154. }
  155. Process& process() { return m_process; }
  156. const Process& process() const { return m_process; }
  157. // NOTE: This returns a null-terminated string.
  158. StringView name() const
  159. {
  160. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  161. VERIFY(m_lock.own_lock());
  162. return m_name ? m_name->view() : StringView {};
  163. }
  164. void set_name(OwnPtr<KString> name)
  165. {
  166. ScopedSpinLock lock(m_lock);
  167. m_name = move(name);
  168. }
  169. void finalize();
  170. enum State : u8 {
  171. Invalid = 0,
  172. Runnable,
  173. Running,
  174. Dying,
  175. Dead,
  176. Stopped,
  177. Blocked
  178. };
  179. class [[nodiscard]] BlockResult {
  180. public:
  181. enum Type {
  182. WokeNormally,
  183. NotBlocked,
  184. InterruptedBySignal,
  185. InterruptedByDeath,
  186. InterruptedByTimeout,
  187. };
  188. BlockResult() = delete;
  189. BlockResult(Type type)
  190. : m_type(type)
  191. {
  192. }
  193. bool operator==(Type type) const
  194. {
  195. return m_type == type;
  196. }
  197. bool operator!=(Type type) const
  198. {
  199. return m_type != type;
  200. }
  201. [[nodiscard]] bool was_interrupted() const
  202. {
  203. switch (m_type) {
  204. case InterruptedBySignal:
  205. case InterruptedByDeath:
  206. return true;
  207. default:
  208. return false;
  209. }
  210. }
  211. [[nodiscard]] bool timed_out() const
  212. {
  213. return m_type == InterruptedByTimeout;
  214. }
  215. private:
  216. Type m_type;
  217. };
  218. class BlockTimeout {
  219. public:
  220. BlockTimeout()
  221. : m_infinite(true)
  222. {
  223. }
  224. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  225. const Time& absolute_time() const { return m_time; }
  226. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  227. clockid_t clock_id() const { return m_clock_id; }
  228. bool is_infinite() const { return m_infinite; }
  229. bool should_block() const { return m_infinite || m_should_block; };
  230. private:
  231. Time m_time {};
  232. Time m_start_time {};
  233. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  234. bool m_infinite { false };
  235. bool m_should_block { false };
  236. };
  237. class BlockCondition;
  238. class Blocker {
  239. public:
  240. enum class Type {
  241. Unknown = 0,
  242. File,
  243. Futex,
  244. Plan9FS,
  245. Join,
  246. Queue,
  247. Routing,
  248. Sleep,
  249. Wait
  250. };
  251. virtual ~Blocker();
  252. virtual StringView state_string() const = 0;
  253. virtual bool should_block() { return true; }
  254. virtual Type blocker_type() const = 0;
  255. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  256. virtual bool can_be_interrupted() const { return true; }
  257. virtual void not_blocking(bool) = 0;
  258. virtual void was_unblocked(bool did_timeout)
  259. {
  260. if (did_timeout) {
  261. ScopedSpinLock lock(m_lock);
  262. m_did_timeout = true;
  263. }
  264. }
  265. void set_interrupted_by_death()
  266. {
  267. ScopedSpinLock lock(m_lock);
  268. do_set_interrupted_by_death();
  269. }
  270. void set_interrupted_by_signal(u8 signal)
  271. {
  272. ScopedSpinLock lock(m_lock);
  273. do_set_interrupted_by_signal(signal);
  274. }
  275. u8 was_interrupted_by_signal() const
  276. {
  277. ScopedSpinLock lock(m_lock);
  278. return do_get_interrupted_by_signal();
  279. }
  280. virtual Thread::BlockResult block_result()
  281. {
  282. ScopedSpinLock lock(m_lock);
  283. if (m_was_interrupted_by_death)
  284. return Thread::BlockResult::InterruptedByDeath;
  285. if (m_was_interrupted_by_signal != 0)
  286. return Thread::BlockResult::InterruptedBySignal;
  287. if (m_did_timeout)
  288. return Thread::BlockResult::InterruptedByTimeout;
  289. return Thread::BlockResult::WokeNormally;
  290. }
  291. void begin_blocking(Badge<Thread>);
  292. BlockResult end_blocking(Badge<Thread>, bool);
  293. protected:
  294. void do_set_interrupted_by_death()
  295. {
  296. m_was_interrupted_by_death = true;
  297. }
  298. void do_set_interrupted_by_signal(u8 signal)
  299. {
  300. VERIFY(signal != 0);
  301. m_was_interrupted_by_signal = signal;
  302. }
  303. void do_clear_interrupted_by_signal()
  304. {
  305. m_was_interrupted_by_signal = 0;
  306. }
  307. u8 do_get_interrupted_by_signal() const
  308. {
  309. return m_was_interrupted_by_signal;
  310. }
  311. [[nodiscard]] bool was_interrupted() const
  312. {
  313. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  314. }
  315. void unblock_from_blocker()
  316. {
  317. RefPtr<Thread> thread;
  318. {
  319. ScopedSpinLock lock(m_lock);
  320. if (m_is_blocking) {
  321. m_is_blocking = false;
  322. VERIFY(m_blocked_thread);
  323. thread = m_blocked_thread;
  324. }
  325. }
  326. if (thread)
  327. thread->unblock_from_blocker(*this);
  328. }
  329. bool set_block_condition(BlockCondition&, void* = nullptr);
  330. void set_block_condition_raw_locked(BlockCondition* block_condition)
  331. {
  332. m_block_condition = block_condition;
  333. }
  334. mutable RecursiveSpinLock m_lock;
  335. private:
  336. BlockCondition* m_block_condition { nullptr };
  337. void* m_block_data { nullptr };
  338. Thread* m_blocked_thread { nullptr };
  339. u8 m_was_interrupted_by_signal { 0 };
  340. bool m_is_blocking { false };
  341. bool m_was_interrupted_by_death { false };
  342. bool m_did_timeout { false };
  343. };
  344. class BlockCondition {
  345. AK_MAKE_NONCOPYABLE(BlockCondition);
  346. AK_MAKE_NONMOVABLE(BlockCondition);
  347. public:
  348. BlockCondition() = default;
  349. virtual ~BlockCondition()
  350. {
  351. ScopedSpinLock lock(m_lock);
  352. VERIFY(m_blockers.is_empty());
  353. }
  354. bool add_blocker(Blocker& blocker, void* data)
  355. {
  356. ScopedSpinLock lock(m_lock);
  357. if (!should_add_blocker(blocker, data))
  358. return false;
  359. m_blockers.append({ &blocker, data });
  360. return true;
  361. }
  362. void remove_blocker(Blocker& blocker, void* data)
  363. {
  364. ScopedSpinLock lock(m_lock);
  365. // NOTE: it's possible that the blocker is no longer present
  366. m_blockers.remove_first_matching([&](auto& info) {
  367. return info.blocker == &blocker && info.data == data;
  368. });
  369. }
  370. bool is_empty() const
  371. {
  372. ScopedSpinLock lock(m_lock);
  373. return is_empty_locked();
  374. }
  375. protected:
  376. template<typename UnblockOne>
  377. bool unblock(UnblockOne unblock_one)
  378. {
  379. ScopedSpinLock lock(m_lock);
  380. return do_unblock(unblock_one);
  381. }
  382. template<typename UnblockOne>
  383. bool do_unblock(UnblockOne unblock_one)
  384. {
  385. VERIFY(m_lock.is_locked());
  386. bool stop_iterating = false;
  387. bool did_unblock = false;
  388. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  389. auto& info = m_blockers[i];
  390. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  391. m_blockers.remove(i);
  392. did_unblock = true;
  393. continue;
  394. }
  395. i++;
  396. }
  397. return did_unblock;
  398. }
  399. bool is_empty_locked() const
  400. {
  401. VERIFY(m_lock.is_locked());
  402. return m_blockers.is_empty();
  403. }
  404. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  405. struct BlockerInfo {
  406. Blocker* blocker;
  407. void* data;
  408. };
  409. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  410. {
  411. if (m_blockers.size() <= count)
  412. return move(m_blockers);
  413. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  414. VERIFY(move_count > 0);
  415. Vector<BlockerInfo, 4> taken_blockers;
  416. taken_blockers.ensure_capacity(move_count);
  417. for (size_t i = 0; i < move_count; i++)
  418. taken_blockers.append(m_blockers.take(i));
  419. m_blockers.remove(0, move_count);
  420. return taken_blockers;
  421. }
  422. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  423. {
  424. if (blockers_to_append.is_empty())
  425. return;
  426. if (m_blockers.is_empty()) {
  427. m_blockers = move(blockers_to_append);
  428. return;
  429. }
  430. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  431. for (size_t i = 0; i < blockers_to_append.size(); i++)
  432. m_blockers.append(blockers_to_append.take(i));
  433. blockers_to_append.clear();
  434. }
  435. mutable SpinLock<u8> m_lock;
  436. private:
  437. Vector<BlockerInfo, 4> m_blockers;
  438. };
  439. friend class JoinBlocker;
  440. class JoinBlocker final : public Blocker {
  441. public:
  442. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  443. virtual Type blocker_type() const override { return Type::Join; }
  444. virtual StringView state_string() const override { return "Joining"sv; }
  445. virtual bool can_be_interrupted() const override { return false; }
  446. virtual bool should_block() override { return !m_join_error && m_should_block; }
  447. virtual void not_blocking(bool) override;
  448. bool unblock(void*, bool);
  449. private:
  450. NonnullRefPtr<Thread> m_joinee;
  451. void*& m_joinee_exit_value;
  452. bool m_join_error { false };
  453. bool m_did_unblock { false };
  454. bool m_should_block { true };
  455. };
  456. class QueueBlocker : public Blocker {
  457. public:
  458. explicit QueueBlocker(WaitQueue&, StringView block_reason = {});
  459. virtual ~QueueBlocker();
  460. virtual Type blocker_type() const override { return Type::Queue; }
  461. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  462. virtual void not_blocking(bool) override { }
  463. virtual bool should_block() override
  464. {
  465. return m_should_block;
  466. }
  467. bool unblock();
  468. protected:
  469. StringView m_block_reason;
  470. bool m_should_block { true };
  471. bool m_did_unblock { false };
  472. };
  473. class FutexBlocker : public Blocker {
  474. public:
  475. explicit FutexBlocker(FutexQueue&, u32);
  476. virtual ~FutexBlocker();
  477. virtual Type blocker_type() const override { return Type::Futex; }
  478. virtual StringView state_string() const override { return "Futex"sv; }
  479. virtual void not_blocking(bool) override { }
  480. virtual bool should_block() override
  481. {
  482. return m_should_block;
  483. }
  484. u32 bitset() const { return m_bitset; }
  485. void begin_requeue()
  486. {
  487. // We need to hold the lock until we moved it over
  488. m_relock_flags = m_lock.lock();
  489. }
  490. void finish_requeue(FutexQueue&);
  491. bool unblock_bitset(u32 bitset);
  492. bool unblock(bool force = false);
  493. protected:
  494. u32 m_bitset;
  495. u32 m_relock_flags { 0 };
  496. bool m_should_block { true };
  497. bool m_did_unblock { false };
  498. };
  499. class FileBlocker : public Blocker {
  500. public:
  501. enum class BlockFlags : u16 {
  502. None = 0,
  503. Read = 1 << 0,
  504. Write = 1 << 1,
  505. ReadPriority = 1 << 2,
  506. Accept = 1 << 3,
  507. Connect = 1 << 4,
  508. SocketFlags = Accept | Connect,
  509. WriteNotOpen = 1 << 5,
  510. WriteError = 1 << 6,
  511. WriteHangUp = 1 << 7,
  512. ReadHangUp = 1 << 8,
  513. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  514. };
  515. virtual Type blocker_type() const override { return Type::File; }
  516. virtual bool should_block() override
  517. {
  518. return m_should_block;
  519. }
  520. virtual bool unblock(bool, void*) = 0;
  521. protected:
  522. bool m_should_block { true };
  523. };
  524. class FileDescriptionBlocker : public FileBlocker {
  525. public:
  526. const FileDescription& blocked_description() const;
  527. virtual bool unblock(bool, void*) override;
  528. virtual void not_blocking(bool) override;
  529. protected:
  530. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  531. private:
  532. NonnullRefPtr<FileDescription> m_blocked_description;
  533. const BlockFlags m_flags;
  534. BlockFlags& m_unblocked_flags;
  535. bool m_did_unblock { false };
  536. };
  537. class AcceptBlocker final : public FileDescriptionBlocker {
  538. public:
  539. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  540. virtual StringView state_string() const override { return "Accepting"sv; }
  541. };
  542. class ConnectBlocker final : public FileDescriptionBlocker {
  543. public:
  544. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  545. virtual StringView state_string() const override { return "Connecting"sv; }
  546. };
  547. class WriteBlocker final : public FileDescriptionBlocker {
  548. public:
  549. explicit WriteBlocker(FileDescription&, BlockFlags&);
  550. virtual StringView state_string() const override { return "Writing"sv; }
  551. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  552. private:
  553. BlockTimeout m_timeout;
  554. };
  555. class ReadBlocker final : public FileDescriptionBlocker {
  556. public:
  557. explicit ReadBlocker(FileDescription&, BlockFlags&);
  558. virtual StringView state_string() const override { return "Reading"sv; }
  559. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  560. private:
  561. BlockTimeout m_timeout;
  562. };
  563. class SleepBlocker final : public Blocker {
  564. public:
  565. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  566. virtual StringView state_string() const override { return "Sleeping"sv; }
  567. virtual Type blocker_type() const override { return Type::Sleep; }
  568. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  569. virtual void not_blocking(bool) override;
  570. virtual void was_unblocked(bool) override;
  571. virtual Thread::BlockResult block_result() override;
  572. private:
  573. void calculate_remaining();
  574. BlockTimeout m_deadline;
  575. Time* m_remaining;
  576. };
  577. class SelectBlocker final : public FileBlocker {
  578. public:
  579. struct FDInfo {
  580. NonnullRefPtr<FileDescription> description;
  581. BlockFlags block_flags { BlockFlags::None };
  582. BlockFlags unblocked_flags { BlockFlags::None };
  583. };
  584. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  585. SelectBlocker(FDVector& fds);
  586. virtual ~SelectBlocker();
  587. virtual bool unblock(bool, void*) override;
  588. virtual void not_blocking(bool) override;
  589. virtual void was_unblocked(bool) override;
  590. virtual StringView state_string() const override { return "Selecting"sv; }
  591. private:
  592. size_t collect_unblocked_flags();
  593. FDVector& m_fds;
  594. bool m_did_unblock { false };
  595. };
  596. class WaitBlocker final : public Blocker {
  597. public:
  598. enum class UnblockFlags {
  599. Terminated,
  600. Stopped,
  601. Continued,
  602. Disowned
  603. };
  604. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  605. virtual StringView state_string() const override { return "Waiting"sv; }
  606. virtual Type blocker_type() const override { return Type::Wait; }
  607. virtual bool should_block() override { return m_should_block; }
  608. virtual void not_blocking(bool) override;
  609. virtual void was_unblocked(bool) override;
  610. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  611. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  612. private:
  613. void do_was_disowned();
  614. void do_set_result(const siginfo_t&);
  615. const int m_wait_options;
  616. const idtype_t m_id_type;
  617. const pid_t m_waitee_id;
  618. KResultOr<siginfo_t>& m_result;
  619. RefPtr<Process> m_waitee;
  620. RefPtr<ProcessGroup> m_waitee_group;
  621. bool m_did_unblock { false };
  622. bool m_error { false };
  623. bool m_got_sigchild { false };
  624. bool m_should_block;
  625. };
  626. class WaitBlockCondition final : public BlockCondition {
  627. friend class WaitBlocker;
  628. public:
  629. WaitBlockCondition(Process& process)
  630. : m_process(process)
  631. {
  632. }
  633. void disowned_by_waiter(Process&);
  634. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  635. void try_unblock(WaitBlocker&);
  636. void finalize();
  637. protected:
  638. virtual bool should_add_blocker(Blocker&, void*) override;
  639. private:
  640. struct ProcessBlockInfo {
  641. NonnullRefPtr<Process> process;
  642. WaitBlocker::UnblockFlags flags;
  643. u8 signal;
  644. bool was_waited { false };
  645. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  646. ~ProcessBlockInfo();
  647. };
  648. Process& m_process;
  649. Vector<ProcessBlockInfo, 2> m_processes;
  650. bool m_finalized { false };
  651. };
  652. template<typename AddBlockerHandler>
  653. KResult try_join(AddBlockerHandler add_blocker)
  654. {
  655. if (Thread::current() == this)
  656. return EDEADLK;
  657. ScopedSpinLock lock(m_lock);
  658. if (!m_is_joinable || state() == Dead)
  659. return EINVAL;
  660. add_blocker();
  661. // From this point on the thread is no longer joinable by anyone
  662. // else. It also means that if the join is timed, it becomes
  663. // detached when a timeout happens.
  664. m_is_joinable = false;
  665. return KSuccess;
  666. }
  667. void did_schedule() { ++m_times_scheduled; }
  668. u32 times_scheduled() const { return m_times_scheduled; }
  669. void resume_from_stopped();
  670. [[nodiscard]] bool should_be_stopped() const;
  671. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  672. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  673. [[nodiscard]] bool is_in_block() const
  674. {
  675. ScopedSpinLock lock(m_block_lock);
  676. return m_in_block;
  677. }
  678. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  679. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  680. u32 affinity() const { return m_cpu_affinity; }
  681. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  682. RegisterState& get_register_dump_from_stack();
  683. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  684. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  685. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  686. ThreadRegisters& regs() { return m_regs; }
  687. ThreadRegisters const& regs() const { return m_regs; }
  688. State state() const { return m_state; }
  689. StringView state_string() const;
  690. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  691. size_t thread_specific_region_size() const;
  692. size_t thread_specific_region_alignment() const;
  693. ALWAYS_INLINE void yield_if_stopped()
  694. {
  695. // If some thread stopped us, we need to yield to someone else
  696. // We check this when entering/exiting a system call. A thread
  697. // may continue to execute in user land until the next timer
  698. // tick or entering the next system call, or if it's in kernel
  699. // mode then we will intercept prior to returning back to user
  700. // mode.
  701. ScopedSpinLock lock(m_lock);
  702. while (state() == Thread::Stopped) {
  703. lock.unlock();
  704. // We shouldn't be holding the big lock here
  705. yield_assuming_not_holding_big_lock();
  706. lock.lock();
  707. }
  708. }
  709. void block(Kernel::Mutex&, ScopedSpinLock<SpinLock<u8>>&, u32);
  710. template<typename BlockerType, class... Args>
  711. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  712. {
  713. VERIFY(!Processor::current().in_irq());
  714. VERIFY(this == Thread::current());
  715. ScopedCritical critical;
  716. VERIFY(!s_mm_lock.own_lock());
  717. ScopedSpinLock block_lock(m_block_lock);
  718. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  719. // as it is constructed and registered elsewhere
  720. m_in_block = true;
  721. BlockerType blocker(forward<Args>(args)...);
  722. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  723. // Relaxed semantics are fine for timeout_unblocked because we
  724. // synchronize on the spin locks already.
  725. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  726. bool timer_was_added = false;
  727. {
  728. switch (state()) {
  729. case Thread::Stopped:
  730. // It's possible that we were requested to be stopped!
  731. break;
  732. case Thread::Running:
  733. VERIFY(m_blocker == nullptr);
  734. break;
  735. default:
  736. VERIFY_NOT_REACHED();
  737. }
  738. m_blocker = &blocker;
  739. if (!blocker.should_block()) {
  740. // Don't block if the wake condition is already met
  741. blocker.not_blocking(false);
  742. m_blocker = nullptr;
  743. m_in_block = false;
  744. return BlockResult::NotBlocked;
  745. }
  746. auto& block_timeout = blocker.override_timeout(timeout);
  747. if (!block_timeout.is_infinite()) {
  748. // Process::kill_all_threads may be called at any time, which will mark all
  749. // threads to die. In that case
  750. timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  751. VERIFY(!Processor::current().in_irq());
  752. VERIFY(!g_scheduler_lock.own_lock());
  753. VERIFY(!m_block_lock.own_lock());
  754. // NOTE: this may execute on the same or any other processor!
  755. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  756. ScopedSpinLock block_lock(m_block_lock);
  757. if (m_blocker && timeout_unblocked.exchange(true) == false)
  758. unblock();
  759. });
  760. if (!timer_was_added) {
  761. // Timeout is already in the past
  762. blocker.not_blocking(true);
  763. m_blocker = nullptr;
  764. m_in_block = false;
  765. return BlockResult::InterruptedByTimeout;
  766. }
  767. }
  768. blocker.begin_blocking({});
  769. set_state(Thread::Blocked);
  770. }
  771. scheduler_lock.unlock();
  772. block_lock.unlock();
  773. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
  774. bool did_timeout = false;
  775. u32 lock_count_to_restore = 0;
  776. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  777. for (;;) {
  778. // Yield to the scheduler, and wait for us to resume unblocked.
  779. VERIFY(!g_scheduler_lock.own_lock());
  780. VERIFY(Processor::current().in_critical());
  781. yield_assuming_not_holding_big_lock();
  782. VERIFY(Processor::current().in_critical());
  783. ScopedSpinLock block_lock2(m_block_lock);
  784. if (should_be_stopped() || state() == Stopped) {
  785. dbgln("Thread should be stopped, current state: {}", state_string());
  786. set_state(Thread::Blocked);
  787. continue;
  788. }
  789. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  790. block_lock2.unlock();
  791. dbgln("Thread should not be unblocking, current state: {}", state_string());
  792. set_state(Thread::Blocked);
  793. continue;
  794. }
  795. // Prevent the timeout from unblocking this thread if it happens to
  796. // be in the process of firing already
  797. did_timeout |= timeout_unblocked.exchange(true);
  798. if (m_blocker) {
  799. // Remove ourselves...
  800. VERIFY(m_blocker == &blocker);
  801. m_blocker = nullptr;
  802. }
  803. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
  804. m_in_block = false;
  805. break;
  806. }
  807. if (blocker.was_interrupted_by_signal()) {
  808. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  809. ScopedSpinLock lock(m_lock);
  810. dispatch_one_pending_signal();
  811. }
  812. // Notify the blocker that we are no longer blocking. It may need
  813. // to clean up now while we're still holding m_lock
  814. auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
  815. if (timer_was_added && !did_timeout) {
  816. // Cancel the timer while not holding any locks. This allows
  817. // the timer function to complete before we remove it
  818. // (e.g. if it's on another processor)
  819. TimerQueue::the().cancel_timer(*m_block_timer);
  820. }
  821. if (previous_locked != LockMode::Unlocked) {
  822. // NOTE: this may trigger another call to Thread::block(), so
  823. // we need to do this after we're all done and restored m_in_block!
  824. relock_process(previous_locked, lock_count_to_restore);
  825. }
  826. return result;
  827. }
  828. u32 unblock_from_lock(Kernel::Mutex&);
  829. void unblock_from_blocker(Blocker&);
  830. void unblock(u8 signal = 0);
  831. template<class... Args>
  832. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  833. {
  834. VERIFY(this == Thread::current());
  835. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  836. }
  837. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  838. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  839. {
  840. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  841. }
  842. BlockResult sleep_until(clockid_t, const Time&);
  843. BlockResult sleep_until(const Time& duration)
  844. {
  845. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  846. }
  847. // Tell this thread to unblock if needed,
  848. // gracefully unwind the stack and die.
  849. void set_should_die();
  850. [[nodiscard]] bool should_die() const { return m_should_die; }
  851. void die_if_needed();
  852. void exit(void* = nullptr);
  853. void update_time_scheduled(u64, bool, bool);
  854. bool tick();
  855. void set_ticks_left(u32 t) { m_ticks_left = t; }
  856. u32 ticks_left() const { return m_ticks_left; }
  857. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  858. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  859. void set_state(State, u8 = 0);
  860. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  861. void set_initialized(bool initialized) { m_initialized = initialized; }
  862. void send_urgent_signal_to_self(u8 signal);
  863. void send_signal(u8 signal, Process* sender);
  864. u32 update_signal_mask(u32 signal_mask);
  865. u32 signal_mask_block(sigset_t signal_set, bool block);
  866. u32 signal_mask() const;
  867. void clear_signals();
  868. KResultOr<u32> peek_debug_register(u32 register_index);
  869. KResult poke_debug_register(u32 register_index, u32 data);
  870. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  871. DispatchSignalResult dispatch_one_pending_signal();
  872. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  873. DispatchSignalResult dispatch_signal(u8 signal);
  874. void check_dispatch_pending_signal();
  875. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  876. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  877. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  878. u32 pending_signals() const;
  879. u32 pending_signals_for_state() const;
  880. FPUState& fpu_state() { return m_fpu_state; }
  881. KResult make_thread_specific_region(Badge<Process>);
  882. unsigned syscall_count() const { return m_syscall_count; }
  883. void did_syscall() { ++m_syscall_count; }
  884. unsigned inode_faults() const { return m_inode_faults; }
  885. void did_inode_fault() { ++m_inode_faults; }
  886. unsigned zero_faults() const { return m_zero_faults; }
  887. void did_zero_fault() { ++m_zero_faults; }
  888. unsigned cow_faults() const { return m_cow_faults; }
  889. void did_cow_fault() { ++m_cow_faults; }
  890. unsigned file_read_bytes() const { return m_file_read_bytes; }
  891. unsigned file_write_bytes() const { return m_file_write_bytes; }
  892. void did_file_read(unsigned bytes)
  893. {
  894. m_file_read_bytes += bytes;
  895. }
  896. void did_file_write(unsigned bytes)
  897. {
  898. m_file_write_bytes += bytes;
  899. }
  900. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  901. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  902. void did_unix_socket_read(unsigned bytes)
  903. {
  904. m_unix_socket_read_bytes += bytes;
  905. }
  906. void did_unix_socket_write(unsigned bytes)
  907. {
  908. m_unix_socket_write_bytes += bytes;
  909. }
  910. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  911. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  912. void did_ipv4_socket_read(unsigned bytes)
  913. {
  914. m_ipv4_socket_read_bytes += bytes;
  915. }
  916. void did_ipv4_socket_write(unsigned bytes)
  917. {
  918. m_ipv4_socket_write_bytes += bytes;
  919. }
  920. void set_active(bool active) { m_is_active = active; }
  921. u32 saved_critical() const { return m_saved_critical; }
  922. void save_critical(u32 critical) { m_saved_critical = critical; }
  923. [[nodiscard]] bool is_active() const { return m_is_active; }
  924. [[nodiscard]] bool is_finalizable() const
  925. {
  926. // We can't finalize as long as this thread is still running
  927. // Note that checking for Running state here isn't sufficient
  928. // as the thread may not be in Running state but switching out.
  929. // m_is_active is set to false once the context switch is
  930. // complete and the thread is not executing on any processor.
  931. if (m_is_active.load(AK::memory_order_acquire))
  932. return false;
  933. // We can't finalize until the thread is either detached or
  934. // a join has started. We can't make m_is_joinable atomic
  935. // because that would introduce a race in try_join.
  936. ScopedSpinLock lock(m_lock);
  937. return !m_is_joinable;
  938. }
  939. RefPtr<Thread> clone(Process&);
  940. template<IteratorFunction<Thread&> Callback>
  941. static IterationDecision for_each_in_state(State, Callback);
  942. template<IteratorFunction<Thread&> Callback>
  943. static IterationDecision for_each(Callback);
  944. template<VoidFunction<Thread&> Callback>
  945. static IterationDecision for_each_in_state(State, Callback);
  946. template<VoidFunction<Thread&> Callback>
  947. static IterationDecision for_each(Callback);
  948. static constexpr u32 default_kernel_stack_size = 65536;
  949. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  950. u64 time_in_user() const { return m_total_time_scheduled_user; }
  951. u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
  952. enum class PreviousMode : u8 {
  953. KernelMode = 0,
  954. UserMode
  955. };
  956. PreviousMode previous_mode() const { return m_previous_mode; }
  957. bool set_previous_mode(PreviousMode mode)
  958. {
  959. if (m_previous_mode == mode)
  960. return false;
  961. m_previous_mode = mode;
  962. return true;
  963. }
  964. TrapFrame*& current_trap() { return m_current_trap; }
  965. RecursiveSpinLock& get_lock() const { return m_lock; }
  966. #if LOCK_DEBUG
  967. void holding_lock(Mutex& lock, int refs_delta, const SourceLocation& location)
  968. {
  969. VERIFY(refs_delta != 0);
  970. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  971. ScopedSpinLock list_lock(m_holding_locks_lock);
  972. if (refs_delta > 0) {
  973. bool have_existing = false;
  974. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  975. auto& info = m_holding_locks_list[i];
  976. if (info.lock == &lock) {
  977. have_existing = true;
  978. info.count += refs_delta;
  979. break;
  980. }
  981. }
  982. if (!have_existing)
  983. m_holding_locks_list.append({ &lock, location, 1 });
  984. } else {
  985. VERIFY(refs_delta < 0);
  986. bool found = false;
  987. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  988. auto& info = m_holding_locks_list[i];
  989. if (info.lock == &lock) {
  990. VERIFY(info.count >= (unsigned)-refs_delta);
  991. info.count -= (unsigned)-refs_delta;
  992. if (info.count == 0)
  993. m_holding_locks_list.remove(i);
  994. found = true;
  995. break;
  996. }
  997. }
  998. VERIFY(found);
  999. }
  1000. }
  1001. u32 lock_count() const
  1002. {
  1003. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  1004. }
  1005. #endif
  1006. bool is_handling_page_fault() const
  1007. {
  1008. return m_handling_page_fault;
  1009. }
  1010. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  1011. void set_idle_thread() { m_is_idle_thread = true; }
  1012. bool is_idle_thread() const { return m_is_idle_thread; }
  1013. ALWAYS_INLINE u32 enter_profiler()
  1014. {
  1015. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  1016. }
  1017. ALWAYS_INLINE u32 leave_profiler()
  1018. {
  1019. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  1020. }
  1021. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  1022. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  1023. InodeIndex global_procfs_inode_index() const { return m_global_procfs_inode_index; }
  1024. String backtrace();
  1025. private:
  1026. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region>, NonnullRefPtr<Timer>, OwnPtr<KString>);
  1027. IntrusiveListNode<Thread> m_process_thread_list_node;
  1028. int m_runnable_priority { -1 };
  1029. friend class WaitQueue;
  1030. class JoinBlockCondition : public BlockCondition {
  1031. public:
  1032. void thread_did_exit(void* exit_value)
  1033. {
  1034. ScopedSpinLock lock(m_lock);
  1035. VERIFY(!m_thread_did_exit);
  1036. m_thread_did_exit = true;
  1037. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1038. do_unblock_joiner();
  1039. }
  1040. void thread_finalizing()
  1041. {
  1042. ScopedSpinLock lock(m_lock);
  1043. do_unblock_joiner();
  1044. }
  1045. void* exit_value() const
  1046. {
  1047. VERIFY(m_thread_did_exit);
  1048. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1049. }
  1050. void try_unblock(JoinBlocker& blocker)
  1051. {
  1052. ScopedSpinLock lock(m_lock);
  1053. if (m_thread_did_exit)
  1054. blocker.unblock(exit_value(), false);
  1055. }
  1056. protected:
  1057. virtual bool should_add_blocker(Blocker& b, void*) override
  1058. {
  1059. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1060. auto& blocker = static_cast<JoinBlocker&>(b);
  1061. // NOTE: m_lock is held already!
  1062. if (m_thread_did_exit) {
  1063. blocker.unblock(exit_value(), true);
  1064. return false;
  1065. }
  1066. return true;
  1067. }
  1068. private:
  1069. void do_unblock_joiner()
  1070. {
  1071. do_unblock([&](Blocker& b, void*, bool&) {
  1072. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1073. auto& blocker = static_cast<JoinBlocker&>(b);
  1074. return blocker.unblock(exit_value(), false);
  1075. });
  1076. }
  1077. Atomic<void*> m_exit_value { nullptr };
  1078. bool m_thread_did_exit { false };
  1079. };
  1080. LockMode unlock_process_if_locked(u32&);
  1081. void relock_process(LockMode, u32);
  1082. void reset_fpu_state();
  1083. mutable RecursiveSpinLock m_lock;
  1084. mutable RecursiveSpinLock m_block_lock;
  1085. NonnullRefPtr<Process> m_process;
  1086. ThreadID m_tid { -1 };
  1087. ThreadRegisters m_regs;
  1088. DebugRegisterState m_debug_register_state {};
  1089. TrapFrame* m_current_trap { nullptr };
  1090. u32 m_saved_critical { 1 };
  1091. IntrusiveListNode<Thread> m_ready_queue_node;
  1092. Atomic<u32> m_cpu { 0 };
  1093. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1094. Optional<u64> m_last_time_scheduled;
  1095. u64 m_total_time_scheduled_user { 0 };
  1096. u64 m_total_time_scheduled_kernel { 0 };
  1097. u32 m_ticks_left { 0 };
  1098. u32 m_times_scheduled { 0 };
  1099. u32 m_ticks_in_user { 0 };
  1100. u32 m_ticks_in_kernel { 0 };
  1101. u32 m_pending_signals { 0 };
  1102. u32 m_signal_mask { 0 };
  1103. FlatPtr m_kernel_stack_base { 0 };
  1104. FlatPtr m_kernel_stack_top { 0 };
  1105. OwnPtr<Region> m_kernel_stack_region;
  1106. VirtualAddress m_thread_specific_data;
  1107. Optional<Range> m_thread_specific_range;
  1108. Array<SignalActionData, NSIG> m_signal_action_data;
  1109. Blocker* m_blocker { nullptr };
  1110. Kernel::Mutex* m_blocking_lock { nullptr };
  1111. u32 m_lock_requested_count { 0 };
  1112. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1113. #if LOCK_DEBUG
  1114. struct HoldingLockInfo {
  1115. Mutex* lock;
  1116. SourceLocation source_location;
  1117. unsigned count;
  1118. };
  1119. Atomic<u32> m_holding_locks { 0 };
  1120. SpinLock<u8> m_holding_locks_lock;
  1121. Vector<HoldingLockInfo> m_holding_locks_list;
  1122. #endif
  1123. JoinBlockCondition m_join_condition;
  1124. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1125. bool m_is_joinable { true };
  1126. bool m_handling_page_fault { false };
  1127. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1128. unsigned m_syscall_count { 0 };
  1129. unsigned m_inode_faults { 0 };
  1130. unsigned m_zero_faults { 0 };
  1131. unsigned m_cow_faults { 0 };
  1132. unsigned m_file_read_bytes { 0 };
  1133. unsigned m_file_write_bytes { 0 };
  1134. unsigned m_unix_socket_read_bytes { 0 };
  1135. unsigned m_unix_socket_write_bytes { 0 };
  1136. unsigned m_ipv4_socket_read_bytes { 0 };
  1137. unsigned m_ipv4_socket_write_bytes { 0 };
  1138. FPUState m_fpu_state {};
  1139. State m_state { Invalid };
  1140. OwnPtr<KString> m_name;
  1141. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1142. State m_stop_state { Invalid };
  1143. bool m_dump_backtrace_on_finalization { false };
  1144. bool m_should_die { false };
  1145. bool m_initialized { false };
  1146. bool m_in_block { false };
  1147. bool m_is_idle_thread { false };
  1148. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1149. Atomic<u32> m_nested_profiler_calls { 0 };
  1150. RefPtr<Timer> m_block_timer;
  1151. // Note: This is needed so when we generate thread stack inodes for ProcFS, we know that
  1152. // we assigned a global Inode index to it so we can use it later
  1153. InodeIndex m_global_procfs_inode_index;
  1154. bool m_is_profiling_suppressed { false };
  1155. void yield_and_release_relock_big_lock();
  1156. void yield_assuming_not_holding_big_lock();
  1157. void drop_thread_count(bool);
  1158. };
  1159. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1160. template<IteratorFunction<Thread&> Callback>
  1161. inline IterationDecision Thread::for_each(Callback callback)
  1162. {
  1163. ScopedSpinLock lock(g_tid_map_lock);
  1164. for (auto& it : *g_tid_map) {
  1165. IterationDecision decision = callback(*it.value);
  1166. if (decision != IterationDecision::Continue)
  1167. return decision;
  1168. }
  1169. return IterationDecision::Continue;
  1170. }
  1171. template<IteratorFunction<Thread&> Callback>
  1172. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1173. {
  1174. ScopedSpinLock lock(g_tid_map_lock);
  1175. for (auto& it : *g_tid_map) {
  1176. auto& thread = *it.value;
  1177. if (thread.state() != state)
  1178. continue;
  1179. IterationDecision decision = callback(thread);
  1180. if (decision != IterationDecision::Continue)
  1181. return decision;
  1182. }
  1183. return IterationDecision::Continue;
  1184. }
  1185. template<VoidFunction<Thread&> Callback>
  1186. inline IterationDecision Thread::for_each(Callback callback)
  1187. {
  1188. ScopedSpinLock lock(g_tid_map_lock);
  1189. for (auto& it : *g_tid_map)
  1190. callback(*it.value);
  1191. return IterationDecision::Continue;
  1192. }
  1193. template<VoidFunction<Thread&> Callback>
  1194. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1195. {
  1196. return for_each_in_state(state, [&](auto& thread) {
  1197. callback(thread);
  1198. return IterationDecision::Continue;
  1199. });
  1200. }
  1201. }
  1202. template<>
  1203. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1204. void format(FormatBuilder&, const Kernel::Thread&);
  1205. };