Thread.h 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/HashMap.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #ifdef LOCK_DEBUG
  14. # include <AK/SourceLocation.h>
  15. #endif
  16. #include <AK/String.h>
  17. #include <AK/Time.h>
  18. #include <AK/Vector.h>
  19. #include <AK/WeakPtr.h>
  20. #include <AK/Weakable.h>
  21. #include <Kernel/Arch/x86/RegisterState.h>
  22. #include <Kernel/Arch/x86/SafeMem.h>
  23. #include <Kernel/Debug.h>
  24. #include <Kernel/FileSystem/InodeIdentifier.h>
  25. #include <Kernel/Forward.h>
  26. #include <Kernel/KResult.h>
  27. #include <Kernel/KString.h>
  28. #include <Kernel/Locking/LockMode.h>
  29. #include <Kernel/Memory/VirtualRange.h>
  30. #include <Kernel/Scheduler.h>
  31. #include <Kernel/TimerQueue.h>
  32. #include <Kernel/UnixTypes.h>
  33. #include <LibC/fd_set.h>
  34. #include <LibC/signal_numbers.h>
  35. namespace Kernel {
  36. namespace Memory {
  37. extern RecursiveSpinLock s_mm_lock;
  38. }
  39. enum class DispatchSignalResult {
  40. Deferred = 0,
  41. Yield,
  42. Terminate,
  43. Continue
  44. };
  45. struct SignalActionData {
  46. VirtualAddress handler_or_sigaction;
  47. u32 mask { 0 };
  48. int flags { 0 };
  49. };
  50. struct ThreadSpecificData {
  51. ThreadSpecificData* self;
  52. };
  53. #define THREAD_PRIORITY_MIN 1
  54. #define THREAD_PRIORITY_LOW 10
  55. #define THREAD_PRIORITY_NORMAL 30
  56. #define THREAD_PRIORITY_HIGH 50
  57. #define THREAD_PRIORITY_MAX 99
  58. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  59. struct ThreadRegisters {
  60. #if ARCH(I386)
  61. FlatPtr ss;
  62. FlatPtr gs;
  63. FlatPtr fs;
  64. FlatPtr es;
  65. FlatPtr ds;
  66. FlatPtr edi;
  67. FlatPtr esi;
  68. FlatPtr ebp;
  69. FlatPtr esp;
  70. FlatPtr ebx;
  71. FlatPtr edx;
  72. FlatPtr ecx;
  73. FlatPtr eax;
  74. FlatPtr eip;
  75. FlatPtr esp0;
  76. FlatPtr ss0;
  77. #else
  78. FlatPtr rdi;
  79. FlatPtr rsi;
  80. FlatPtr rbp;
  81. FlatPtr rsp;
  82. FlatPtr rbx;
  83. FlatPtr rdx;
  84. FlatPtr rcx;
  85. FlatPtr rax;
  86. FlatPtr r8;
  87. FlatPtr r9;
  88. FlatPtr r10;
  89. FlatPtr r11;
  90. FlatPtr r12;
  91. FlatPtr r13;
  92. FlatPtr r14;
  93. FlatPtr r15;
  94. FlatPtr rip;
  95. FlatPtr rsp0;
  96. #endif
  97. FlatPtr cs;
  98. #if ARCH(I386)
  99. FlatPtr eflags;
  100. #else
  101. FlatPtr rflags;
  102. #endif
  103. FlatPtr cr3;
  104. FlatPtr ip() const
  105. {
  106. #if ARCH(I386)
  107. return eip;
  108. #else
  109. return rip;
  110. #endif
  111. }
  112. FlatPtr sp() const
  113. {
  114. #if ARCH(I386)
  115. return esp;
  116. #else
  117. return rsp;
  118. #endif
  119. }
  120. };
  121. class Thread
  122. : public RefCounted<Thread>
  123. , public Weakable<Thread> {
  124. AK_MAKE_NONCOPYABLE(Thread);
  125. AK_MAKE_NONMOVABLE(Thread);
  126. friend class Mutex;
  127. friend class Process;
  128. friend class ProtectedProcessBase;
  129. friend class Scheduler;
  130. friend struct ThreadReadyQueue;
  131. static SpinLock<u8> g_tid_map_lock;
  132. static HashMap<ThreadID, Thread*>* g_tid_map;
  133. public:
  134. inline static Thread* current()
  135. {
  136. return Processor::current_thread();
  137. }
  138. static void initialize();
  139. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  140. ~Thread();
  141. static RefPtr<Thread> from_tid(ThreadID);
  142. static void finalize_dying_threads();
  143. ThreadID tid() const { return m_tid; }
  144. ProcessID pid() const;
  145. void set_priority(u32 p) { m_priority = p; }
  146. u32 priority() const { return m_priority; }
  147. void detach()
  148. {
  149. ScopedSpinLock lock(m_lock);
  150. m_is_joinable = false;
  151. }
  152. [[nodiscard]] bool is_joinable() const
  153. {
  154. ScopedSpinLock lock(m_lock);
  155. return m_is_joinable;
  156. }
  157. Process& process() { return m_process; }
  158. const Process& process() const { return m_process; }
  159. // NOTE: This returns a null-terminated string.
  160. StringView name() const
  161. {
  162. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  163. VERIFY(m_lock.own_lock());
  164. return m_name ? m_name->view() : StringView {};
  165. }
  166. void set_name(OwnPtr<KString> name)
  167. {
  168. ScopedSpinLock lock(m_lock);
  169. m_name = move(name);
  170. }
  171. void finalize();
  172. enum State : u8 {
  173. Invalid = 0,
  174. Runnable,
  175. Running,
  176. Dying,
  177. Dead,
  178. Stopped,
  179. Blocked
  180. };
  181. class [[nodiscard]] BlockResult {
  182. public:
  183. enum Type {
  184. WokeNormally,
  185. NotBlocked,
  186. InterruptedBySignal,
  187. InterruptedByDeath,
  188. InterruptedByTimeout,
  189. };
  190. BlockResult() = delete;
  191. BlockResult(Type type)
  192. : m_type(type)
  193. {
  194. }
  195. bool operator==(Type type) const
  196. {
  197. return m_type == type;
  198. }
  199. bool operator!=(Type type) const
  200. {
  201. return m_type != type;
  202. }
  203. [[nodiscard]] bool was_interrupted() const
  204. {
  205. switch (m_type) {
  206. case InterruptedBySignal:
  207. case InterruptedByDeath:
  208. return true;
  209. default:
  210. return false;
  211. }
  212. }
  213. [[nodiscard]] bool timed_out() const
  214. {
  215. return m_type == InterruptedByTimeout;
  216. }
  217. private:
  218. Type m_type;
  219. };
  220. class BlockTimeout {
  221. public:
  222. BlockTimeout()
  223. : m_infinite(true)
  224. {
  225. }
  226. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  227. const Time& absolute_time() const { return m_time; }
  228. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  229. clockid_t clock_id() const { return m_clock_id; }
  230. bool is_infinite() const { return m_infinite; }
  231. bool should_block() const { return m_infinite || m_should_block; };
  232. private:
  233. Time m_time {};
  234. Time m_start_time {};
  235. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  236. bool m_infinite { false };
  237. bool m_should_block { false };
  238. };
  239. class BlockCondition;
  240. class Blocker {
  241. public:
  242. enum class Type {
  243. Unknown = 0,
  244. File,
  245. Futex,
  246. Plan9FS,
  247. Join,
  248. Queue,
  249. Routing,
  250. Sleep,
  251. Wait
  252. };
  253. virtual ~Blocker();
  254. virtual StringView state_string() const = 0;
  255. virtual bool should_block() { return true; }
  256. virtual Type blocker_type() const = 0;
  257. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  258. virtual bool can_be_interrupted() const { return true; }
  259. virtual void not_blocking(bool) = 0;
  260. virtual void was_unblocked(bool did_timeout)
  261. {
  262. if (did_timeout) {
  263. ScopedSpinLock lock(m_lock);
  264. m_did_timeout = true;
  265. }
  266. }
  267. void set_interrupted_by_death()
  268. {
  269. ScopedSpinLock lock(m_lock);
  270. do_set_interrupted_by_death();
  271. }
  272. void set_interrupted_by_signal(u8 signal)
  273. {
  274. ScopedSpinLock lock(m_lock);
  275. do_set_interrupted_by_signal(signal);
  276. }
  277. u8 was_interrupted_by_signal() const
  278. {
  279. ScopedSpinLock lock(m_lock);
  280. return do_get_interrupted_by_signal();
  281. }
  282. virtual Thread::BlockResult block_result()
  283. {
  284. ScopedSpinLock lock(m_lock);
  285. if (m_was_interrupted_by_death)
  286. return Thread::BlockResult::InterruptedByDeath;
  287. if (m_was_interrupted_by_signal != 0)
  288. return Thread::BlockResult::InterruptedBySignal;
  289. if (m_did_timeout)
  290. return Thread::BlockResult::InterruptedByTimeout;
  291. return Thread::BlockResult::WokeNormally;
  292. }
  293. void begin_blocking(Badge<Thread>);
  294. BlockResult end_blocking(Badge<Thread>, bool);
  295. protected:
  296. void do_set_interrupted_by_death()
  297. {
  298. m_was_interrupted_by_death = true;
  299. }
  300. void do_set_interrupted_by_signal(u8 signal)
  301. {
  302. VERIFY(signal != 0);
  303. m_was_interrupted_by_signal = signal;
  304. }
  305. void do_clear_interrupted_by_signal()
  306. {
  307. m_was_interrupted_by_signal = 0;
  308. }
  309. u8 do_get_interrupted_by_signal() const
  310. {
  311. return m_was_interrupted_by_signal;
  312. }
  313. [[nodiscard]] bool was_interrupted() const
  314. {
  315. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  316. }
  317. void unblock_from_blocker()
  318. {
  319. RefPtr<Thread> thread;
  320. {
  321. ScopedSpinLock lock(m_lock);
  322. if (m_is_blocking) {
  323. m_is_blocking = false;
  324. VERIFY(m_blocked_thread);
  325. thread = m_blocked_thread;
  326. }
  327. }
  328. if (thread)
  329. thread->unblock_from_blocker(*this);
  330. }
  331. bool set_block_condition(BlockCondition&, void* = nullptr);
  332. void set_block_condition_raw_locked(BlockCondition* block_condition)
  333. {
  334. m_block_condition = block_condition;
  335. }
  336. mutable RecursiveSpinLock m_lock;
  337. private:
  338. BlockCondition* m_block_condition { nullptr };
  339. void* m_block_data { nullptr };
  340. Thread* m_blocked_thread { nullptr };
  341. u8 m_was_interrupted_by_signal { 0 };
  342. bool m_is_blocking { false };
  343. bool m_was_interrupted_by_death { false };
  344. bool m_did_timeout { false };
  345. };
  346. class BlockCondition {
  347. AK_MAKE_NONCOPYABLE(BlockCondition);
  348. AK_MAKE_NONMOVABLE(BlockCondition);
  349. public:
  350. BlockCondition() = default;
  351. virtual ~BlockCondition()
  352. {
  353. ScopedSpinLock lock(m_lock);
  354. VERIFY(m_blockers.is_empty());
  355. }
  356. bool add_blocker(Blocker& blocker, void* data)
  357. {
  358. ScopedSpinLock lock(m_lock);
  359. if (!should_add_blocker(blocker, data))
  360. return false;
  361. m_blockers.append({ &blocker, data });
  362. return true;
  363. }
  364. void remove_blocker(Blocker& blocker, void* data)
  365. {
  366. ScopedSpinLock lock(m_lock);
  367. // NOTE: it's possible that the blocker is no longer present
  368. m_blockers.remove_first_matching([&](auto& info) {
  369. return info.blocker == &blocker && info.data == data;
  370. });
  371. }
  372. bool is_empty() const
  373. {
  374. ScopedSpinLock lock(m_lock);
  375. return is_empty_locked();
  376. }
  377. protected:
  378. template<typename UnblockOne>
  379. bool unblock(UnblockOne unblock_one)
  380. {
  381. ScopedSpinLock lock(m_lock);
  382. return do_unblock(unblock_one);
  383. }
  384. template<typename UnblockOne>
  385. bool do_unblock(UnblockOne unblock_one)
  386. {
  387. VERIFY(m_lock.is_locked());
  388. bool stop_iterating = false;
  389. bool did_unblock = false;
  390. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  391. auto& info = m_blockers[i];
  392. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  393. m_blockers.remove(i);
  394. did_unblock = true;
  395. continue;
  396. }
  397. i++;
  398. }
  399. return did_unblock;
  400. }
  401. bool is_empty_locked() const
  402. {
  403. VERIFY(m_lock.is_locked());
  404. return m_blockers.is_empty();
  405. }
  406. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  407. struct BlockerInfo {
  408. Blocker* blocker;
  409. void* data;
  410. };
  411. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  412. {
  413. if (m_blockers.size() <= count)
  414. return move(m_blockers);
  415. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  416. VERIFY(move_count > 0);
  417. Vector<BlockerInfo, 4> taken_blockers;
  418. taken_blockers.ensure_capacity(move_count);
  419. for (size_t i = 0; i < move_count; i++)
  420. taken_blockers.append(m_blockers.take(i));
  421. m_blockers.remove(0, move_count);
  422. return taken_blockers;
  423. }
  424. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  425. {
  426. if (blockers_to_append.is_empty())
  427. return;
  428. if (m_blockers.is_empty()) {
  429. m_blockers = move(blockers_to_append);
  430. return;
  431. }
  432. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  433. for (size_t i = 0; i < blockers_to_append.size(); i++)
  434. m_blockers.append(blockers_to_append.take(i));
  435. blockers_to_append.clear();
  436. }
  437. mutable SpinLock<u8> m_lock;
  438. private:
  439. Vector<BlockerInfo, 4> m_blockers;
  440. };
  441. friend class JoinBlocker;
  442. class JoinBlocker final : public Blocker {
  443. public:
  444. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  445. virtual Type blocker_type() const override { return Type::Join; }
  446. virtual StringView state_string() const override { return "Joining"sv; }
  447. virtual bool can_be_interrupted() const override { return false; }
  448. virtual bool should_block() override { return !m_join_error && m_should_block; }
  449. virtual void not_blocking(bool) override;
  450. bool unblock(void*, bool);
  451. private:
  452. NonnullRefPtr<Thread> m_joinee;
  453. void*& m_joinee_exit_value;
  454. bool m_join_error { false };
  455. bool m_did_unblock { false };
  456. bool m_should_block { true };
  457. };
  458. class QueueBlocker : public Blocker {
  459. public:
  460. explicit QueueBlocker(WaitQueue&, StringView block_reason = {});
  461. virtual ~QueueBlocker();
  462. virtual Type blocker_type() const override { return Type::Queue; }
  463. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  464. virtual void not_blocking(bool) override { }
  465. virtual bool should_block() override
  466. {
  467. return m_should_block;
  468. }
  469. bool unblock();
  470. protected:
  471. StringView m_block_reason;
  472. bool m_should_block { true };
  473. bool m_did_unblock { false };
  474. };
  475. class FutexBlocker : public Blocker {
  476. public:
  477. explicit FutexBlocker(FutexQueue&, u32);
  478. virtual ~FutexBlocker();
  479. virtual Type blocker_type() const override { return Type::Futex; }
  480. virtual StringView state_string() const override { return "Futex"sv; }
  481. virtual void not_blocking(bool) override { }
  482. virtual bool should_block() override
  483. {
  484. return m_should_block;
  485. }
  486. u32 bitset() const { return m_bitset; }
  487. void begin_requeue()
  488. {
  489. // We need to hold the lock until we moved it over
  490. m_relock_flags = m_lock.lock();
  491. }
  492. void finish_requeue(FutexQueue&);
  493. bool unblock_bitset(u32 bitset);
  494. bool unblock(bool force = false);
  495. protected:
  496. u32 m_bitset;
  497. u32 m_relock_flags { 0 };
  498. bool m_should_block { true };
  499. bool m_did_unblock { false };
  500. };
  501. class FileBlocker : public Blocker {
  502. public:
  503. enum class BlockFlags : u16 {
  504. None = 0,
  505. Read = 1 << 0,
  506. Write = 1 << 1,
  507. ReadPriority = 1 << 2,
  508. Accept = 1 << 3,
  509. Connect = 1 << 4,
  510. SocketFlags = Accept | Connect,
  511. WriteNotOpen = 1 << 5,
  512. WriteError = 1 << 6,
  513. WriteHangUp = 1 << 7,
  514. ReadHangUp = 1 << 8,
  515. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  516. };
  517. virtual Type blocker_type() const override { return Type::File; }
  518. virtual bool should_block() override
  519. {
  520. return m_should_block;
  521. }
  522. virtual bool unblock(bool, void*) = 0;
  523. protected:
  524. bool m_should_block { true };
  525. };
  526. class FileDescriptionBlocker : public FileBlocker {
  527. public:
  528. const FileDescription& blocked_description() const;
  529. virtual bool unblock(bool, void*) override;
  530. virtual void not_blocking(bool) override;
  531. protected:
  532. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  533. private:
  534. NonnullRefPtr<FileDescription> m_blocked_description;
  535. const BlockFlags m_flags;
  536. BlockFlags& m_unblocked_flags;
  537. bool m_did_unblock { false };
  538. };
  539. class AcceptBlocker final : public FileDescriptionBlocker {
  540. public:
  541. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  542. virtual StringView state_string() const override { return "Accepting"sv; }
  543. };
  544. class ConnectBlocker final : public FileDescriptionBlocker {
  545. public:
  546. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  547. virtual StringView state_string() const override { return "Connecting"sv; }
  548. };
  549. class WriteBlocker final : public FileDescriptionBlocker {
  550. public:
  551. explicit WriteBlocker(FileDescription&, BlockFlags&);
  552. virtual StringView state_string() const override { return "Writing"sv; }
  553. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  554. private:
  555. BlockTimeout m_timeout;
  556. };
  557. class ReadBlocker final : public FileDescriptionBlocker {
  558. public:
  559. explicit ReadBlocker(FileDescription&, BlockFlags&);
  560. virtual StringView state_string() const override { return "Reading"sv; }
  561. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  562. private:
  563. BlockTimeout m_timeout;
  564. };
  565. class SleepBlocker final : public Blocker {
  566. public:
  567. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  568. virtual StringView state_string() const override { return "Sleeping"sv; }
  569. virtual Type blocker_type() const override { return Type::Sleep; }
  570. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  571. virtual void not_blocking(bool) override;
  572. virtual void was_unblocked(bool) override;
  573. virtual Thread::BlockResult block_result() override;
  574. private:
  575. void calculate_remaining();
  576. BlockTimeout m_deadline;
  577. Time* m_remaining;
  578. };
  579. class SelectBlocker final : public FileBlocker {
  580. public:
  581. struct FDInfo {
  582. NonnullRefPtr<FileDescription> description;
  583. BlockFlags block_flags { BlockFlags::None };
  584. BlockFlags unblocked_flags { BlockFlags::None };
  585. };
  586. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  587. SelectBlocker(FDVector& fds);
  588. virtual ~SelectBlocker();
  589. virtual bool unblock(bool, void*) override;
  590. virtual void not_blocking(bool) override;
  591. virtual void was_unblocked(bool) override;
  592. virtual StringView state_string() const override { return "Selecting"sv; }
  593. private:
  594. size_t collect_unblocked_flags();
  595. FDVector& m_fds;
  596. bool m_did_unblock { false };
  597. };
  598. class WaitBlocker final : public Blocker {
  599. public:
  600. enum class UnblockFlags {
  601. Terminated,
  602. Stopped,
  603. Continued,
  604. Disowned
  605. };
  606. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  607. virtual StringView state_string() const override { return "Waiting"sv; }
  608. virtual Type blocker_type() const override { return Type::Wait; }
  609. virtual bool should_block() override { return m_should_block; }
  610. virtual void not_blocking(bool) override;
  611. virtual void was_unblocked(bool) override;
  612. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  613. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  614. private:
  615. void do_was_disowned();
  616. void do_set_result(const siginfo_t&);
  617. const int m_wait_options;
  618. const idtype_t m_id_type;
  619. const pid_t m_waitee_id;
  620. KResultOr<siginfo_t>& m_result;
  621. RefPtr<Process> m_waitee;
  622. RefPtr<ProcessGroup> m_waitee_group;
  623. bool m_did_unblock { false };
  624. bool m_error { false };
  625. bool m_got_sigchild { false };
  626. bool m_should_block;
  627. };
  628. class WaitBlockCondition final : public BlockCondition {
  629. friend class WaitBlocker;
  630. public:
  631. WaitBlockCondition(Process& process)
  632. : m_process(process)
  633. {
  634. }
  635. void disowned_by_waiter(Process&);
  636. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  637. void try_unblock(WaitBlocker&);
  638. void finalize();
  639. protected:
  640. virtual bool should_add_blocker(Blocker&, void*) override;
  641. private:
  642. struct ProcessBlockInfo {
  643. NonnullRefPtr<Process> process;
  644. WaitBlocker::UnblockFlags flags;
  645. u8 signal;
  646. bool was_waited { false };
  647. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  648. ~ProcessBlockInfo();
  649. };
  650. Process& m_process;
  651. Vector<ProcessBlockInfo, 2> m_processes;
  652. bool m_finalized { false };
  653. };
  654. template<typename AddBlockerHandler>
  655. KResult try_join(AddBlockerHandler add_blocker)
  656. {
  657. if (Thread::current() == this)
  658. return EDEADLK;
  659. ScopedSpinLock lock(m_lock);
  660. if (!m_is_joinable || state() == Dead)
  661. return EINVAL;
  662. add_blocker();
  663. // From this point on the thread is no longer joinable by anyone
  664. // else. It also means that if the join is timed, it becomes
  665. // detached when a timeout happens.
  666. m_is_joinable = false;
  667. return KSuccess;
  668. }
  669. void did_schedule() { ++m_times_scheduled; }
  670. u32 times_scheduled() const { return m_times_scheduled; }
  671. void resume_from_stopped();
  672. [[nodiscard]] bool should_be_stopped() const;
  673. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  674. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  675. [[nodiscard]] bool is_in_block() const
  676. {
  677. ScopedSpinLock lock(m_block_lock);
  678. return m_in_block;
  679. }
  680. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  681. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  682. u32 affinity() const { return m_cpu_affinity; }
  683. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  684. RegisterState& get_register_dump_from_stack();
  685. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  686. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  687. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  688. ThreadRegisters& regs() { return m_regs; }
  689. ThreadRegisters const& regs() const { return m_regs; }
  690. State state() const { return m_state; }
  691. StringView state_string() const;
  692. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  693. size_t thread_specific_region_size() const;
  694. size_t thread_specific_region_alignment() const;
  695. ALWAYS_INLINE void yield_if_stopped()
  696. {
  697. // If some thread stopped us, we need to yield to someone else
  698. // We check this when entering/exiting a system call. A thread
  699. // may continue to execute in user land until the next timer
  700. // tick or entering the next system call, or if it's in kernel
  701. // mode then we will intercept prior to returning back to user
  702. // mode.
  703. ScopedSpinLock lock(m_lock);
  704. while (state() == Thread::Stopped) {
  705. lock.unlock();
  706. // We shouldn't be holding the big lock here
  707. yield_assuming_not_holding_big_lock();
  708. lock.lock();
  709. }
  710. }
  711. void block(Kernel::Mutex&, ScopedSpinLock<SpinLock<u8>>&, u32);
  712. template<typename BlockerType, class... Args>
  713. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  714. {
  715. VERIFY(!Processor::current().in_irq());
  716. VERIFY(this == Thread::current());
  717. ScopedCritical critical;
  718. VERIFY(!Memory::s_mm_lock.own_lock());
  719. ScopedSpinLock block_lock(m_block_lock);
  720. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  721. // as it is constructed and registered elsewhere
  722. m_in_block = true;
  723. BlockerType blocker(forward<Args>(args)...);
  724. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  725. // Relaxed semantics are fine for timeout_unblocked because we
  726. // synchronize on the spin locks already.
  727. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  728. bool timer_was_added = false;
  729. {
  730. switch (state()) {
  731. case Thread::Stopped:
  732. // It's possible that we were requested to be stopped!
  733. break;
  734. case Thread::Running:
  735. VERIFY(m_blocker == nullptr);
  736. break;
  737. default:
  738. VERIFY_NOT_REACHED();
  739. }
  740. m_blocker = &blocker;
  741. if (!blocker.should_block()) {
  742. // Don't block if the wake condition is already met
  743. blocker.not_blocking(false);
  744. m_blocker = nullptr;
  745. m_in_block = false;
  746. return BlockResult::NotBlocked;
  747. }
  748. auto& block_timeout = blocker.override_timeout(timeout);
  749. if (!block_timeout.is_infinite()) {
  750. // Process::kill_all_threads may be called at any time, which will mark all
  751. // threads to die. In that case
  752. timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  753. VERIFY(!Processor::current().in_irq());
  754. VERIFY(!g_scheduler_lock.own_lock());
  755. VERIFY(!m_block_lock.own_lock());
  756. // NOTE: this may execute on the same or any other processor!
  757. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  758. ScopedSpinLock block_lock(m_block_lock);
  759. if (m_blocker && timeout_unblocked.exchange(true) == false)
  760. unblock();
  761. });
  762. if (!timer_was_added) {
  763. // Timeout is already in the past
  764. blocker.not_blocking(true);
  765. m_blocker = nullptr;
  766. m_in_block = false;
  767. return BlockResult::InterruptedByTimeout;
  768. }
  769. }
  770. blocker.begin_blocking({});
  771. set_state(Thread::Blocked);
  772. }
  773. scheduler_lock.unlock();
  774. block_lock.unlock();
  775. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
  776. bool did_timeout = false;
  777. u32 lock_count_to_restore = 0;
  778. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  779. for (;;) {
  780. // Yield to the scheduler, and wait for us to resume unblocked.
  781. VERIFY(!g_scheduler_lock.own_lock());
  782. VERIFY(Processor::current().in_critical());
  783. yield_assuming_not_holding_big_lock();
  784. VERIFY(Processor::current().in_critical());
  785. ScopedSpinLock block_lock2(m_block_lock);
  786. if (should_be_stopped() || state() == Stopped) {
  787. dbgln("Thread should be stopped, current state: {}", state_string());
  788. set_state(Thread::Blocked);
  789. continue;
  790. }
  791. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  792. block_lock2.unlock();
  793. dbgln("Thread should not be unblocking, current state: {}", state_string());
  794. set_state(Thread::Blocked);
  795. continue;
  796. }
  797. // Prevent the timeout from unblocking this thread if it happens to
  798. // be in the process of firing already
  799. did_timeout |= timeout_unblocked.exchange(true);
  800. if (m_blocker) {
  801. // Remove ourselves...
  802. VERIFY(m_blocker == &blocker);
  803. m_blocker = nullptr;
  804. }
  805. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
  806. m_in_block = false;
  807. break;
  808. }
  809. if (blocker.was_interrupted_by_signal()) {
  810. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  811. ScopedSpinLock lock(m_lock);
  812. dispatch_one_pending_signal();
  813. }
  814. // Notify the blocker that we are no longer blocking. It may need
  815. // to clean up now while we're still holding m_lock
  816. auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
  817. if (timer_was_added && !did_timeout) {
  818. // Cancel the timer while not holding any locks. This allows
  819. // the timer function to complete before we remove it
  820. // (e.g. if it's on another processor)
  821. TimerQueue::the().cancel_timer(*m_block_timer);
  822. }
  823. if (previous_locked != LockMode::Unlocked) {
  824. // NOTE: this may trigger another call to Thread::block(), so
  825. // we need to do this after we're all done and restored m_in_block!
  826. relock_process(previous_locked, lock_count_to_restore);
  827. }
  828. return result;
  829. }
  830. u32 unblock_from_lock(Kernel::Mutex&);
  831. void unblock_from_blocker(Blocker&);
  832. void unblock(u8 signal = 0);
  833. template<class... Args>
  834. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  835. {
  836. VERIFY(this == Thread::current());
  837. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  838. }
  839. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  840. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  841. {
  842. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  843. }
  844. BlockResult sleep_until(clockid_t, const Time&);
  845. BlockResult sleep_until(const Time& duration)
  846. {
  847. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  848. }
  849. // Tell this thread to unblock if needed,
  850. // gracefully unwind the stack and die.
  851. void set_should_die();
  852. [[nodiscard]] bool should_die() const { return m_should_die; }
  853. void die_if_needed();
  854. void exit(void* = nullptr);
  855. void update_time_scheduled(u64, bool, bool);
  856. bool tick();
  857. void set_ticks_left(u32 t) { m_ticks_left = t; }
  858. u32 ticks_left() const { return m_ticks_left; }
  859. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  860. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  861. void set_state(State, u8 = 0);
  862. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  863. void set_initialized(bool initialized) { m_initialized = initialized; }
  864. void send_urgent_signal_to_self(u8 signal);
  865. void send_signal(u8 signal, Process* sender);
  866. u32 update_signal_mask(u32 signal_mask);
  867. u32 signal_mask_block(sigset_t signal_set, bool block);
  868. u32 signal_mask() const;
  869. void clear_signals();
  870. KResultOr<u32> peek_debug_register(u32 register_index);
  871. KResult poke_debug_register(u32 register_index, u32 data);
  872. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  873. DispatchSignalResult dispatch_one_pending_signal();
  874. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  875. DispatchSignalResult dispatch_signal(u8 signal);
  876. void check_dispatch_pending_signal();
  877. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  878. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  879. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  880. u32 pending_signals() const;
  881. u32 pending_signals_for_state() const;
  882. FPUState& fpu_state() { return m_fpu_state; }
  883. KResult make_thread_specific_region(Badge<Process>);
  884. unsigned syscall_count() const { return m_syscall_count; }
  885. void did_syscall() { ++m_syscall_count; }
  886. unsigned inode_faults() const { return m_inode_faults; }
  887. void did_inode_fault() { ++m_inode_faults; }
  888. unsigned zero_faults() const { return m_zero_faults; }
  889. void did_zero_fault() { ++m_zero_faults; }
  890. unsigned cow_faults() const { return m_cow_faults; }
  891. void did_cow_fault() { ++m_cow_faults; }
  892. unsigned file_read_bytes() const { return m_file_read_bytes; }
  893. unsigned file_write_bytes() const { return m_file_write_bytes; }
  894. void did_file_read(unsigned bytes)
  895. {
  896. m_file_read_bytes += bytes;
  897. }
  898. void did_file_write(unsigned bytes)
  899. {
  900. m_file_write_bytes += bytes;
  901. }
  902. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  903. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  904. void did_unix_socket_read(unsigned bytes)
  905. {
  906. m_unix_socket_read_bytes += bytes;
  907. }
  908. void did_unix_socket_write(unsigned bytes)
  909. {
  910. m_unix_socket_write_bytes += bytes;
  911. }
  912. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  913. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  914. void did_ipv4_socket_read(unsigned bytes)
  915. {
  916. m_ipv4_socket_read_bytes += bytes;
  917. }
  918. void did_ipv4_socket_write(unsigned bytes)
  919. {
  920. m_ipv4_socket_write_bytes += bytes;
  921. }
  922. void set_active(bool active) { m_is_active = active; }
  923. u32 saved_critical() const { return m_saved_critical; }
  924. void save_critical(u32 critical) { m_saved_critical = critical; }
  925. [[nodiscard]] bool is_active() const { return m_is_active; }
  926. [[nodiscard]] bool is_finalizable() const
  927. {
  928. // We can't finalize as long as this thread is still running
  929. // Note that checking for Running state here isn't sufficient
  930. // as the thread may not be in Running state but switching out.
  931. // m_is_active is set to false once the context switch is
  932. // complete and the thread is not executing on any processor.
  933. if (m_is_active.load(AK::memory_order_acquire))
  934. return false;
  935. // We can't finalize until the thread is either detached or
  936. // a join has started. We can't make m_is_joinable atomic
  937. // because that would introduce a race in try_join.
  938. ScopedSpinLock lock(m_lock);
  939. return !m_is_joinable;
  940. }
  941. RefPtr<Thread> clone(Process&);
  942. template<IteratorFunction<Thread&> Callback>
  943. static IterationDecision for_each_in_state(State, Callback);
  944. template<IteratorFunction<Thread&> Callback>
  945. static IterationDecision for_each(Callback);
  946. template<VoidFunction<Thread&> Callback>
  947. static IterationDecision for_each_in_state(State, Callback);
  948. template<VoidFunction<Thread&> Callback>
  949. static IterationDecision for_each(Callback);
  950. static constexpr u32 default_kernel_stack_size = 65536;
  951. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  952. u64 time_in_user() const { return m_total_time_scheduled_user; }
  953. u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
  954. enum class PreviousMode : u8 {
  955. KernelMode = 0,
  956. UserMode
  957. };
  958. PreviousMode previous_mode() const { return m_previous_mode; }
  959. bool set_previous_mode(PreviousMode mode)
  960. {
  961. if (m_previous_mode == mode)
  962. return false;
  963. m_previous_mode = mode;
  964. return true;
  965. }
  966. TrapFrame*& current_trap() { return m_current_trap; }
  967. RecursiveSpinLock& get_lock() const { return m_lock; }
  968. #if LOCK_DEBUG
  969. void holding_lock(Mutex& lock, int refs_delta, const SourceLocation& location)
  970. {
  971. VERIFY(refs_delta != 0);
  972. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  973. ScopedSpinLock list_lock(m_holding_locks_lock);
  974. if (refs_delta > 0) {
  975. bool have_existing = false;
  976. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  977. auto& info = m_holding_locks_list[i];
  978. if (info.lock == &lock) {
  979. have_existing = true;
  980. info.count += refs_delta;
  981. break;
  982. }
  983. }
  984. if (!have_existing)
  985. m_holding_locks_list.append({ &lock, location, 1 });
  986. } else {
  987. VERIFY(refs_delta < 0);
  988. bool found = false;
  989. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  990. auto& info = m_holding_locks_list[i];
  991. if (info.lock == &lock) {
  992. VERIFY(info.count >= (unsigned)-refs_delta);
  993. info.count -= (unsigned)-refs_delta;
  994. if (info.count == 0)
  995. m_holding_locks_list.remove(i);
  996. found = true;
  997. break;
  998. }
  999. }
  1000. VERIFY(found);
  1001. }
  1002. }
  1003. u32 lock_count() const
  1004. {
  1005. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  1006. }
  1007. #endif
  1008. bool is_handling_page_fault() const
  1009. {
  1010. return m_handling_page_fault;
  1011. }
  1012. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  1013. void set_idle_thread() { m_is_idle_thread = true; }
  1014. bool is_idle_thread() const { return m_is_idle_thread; }
  1015. ALWAYS_INLINE u32 enter_profiler()
  1016. {
  1017. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  1018. }
  1019. ALWAYS_INLINE u32 leave_profiler()
  1020. {
  1021. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  1022. }
  1023. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  1024. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  1025. InodeIndex global_procfs_inode_index() const { return m_global_procfs_inode_index; }
  1026. String backtrace();
  1027. private:
  1028. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, OwnPtr<KString>);
  1029. IntrusiveListNode<Thread> m_process_thread_list_node;
  1030. int m_runnable_priority { -1 };
  1031. friend class WaitQueue;
  1032. class JoinBlockCondition : public BlockCondition {
  1033. public:
  1034. void thread_did_exit(void* exit_value)
  1035. {
  1036. ScopedSpinLock lock(m_lock);
  1037. VERIFY(!m_thread_did_exit);
  1038. m_thread_did_exit = true;
  1039. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1040. do_unblock_joiner();
  1041. }
  1042. void thread_finalizing()
  1043. {
  1044. ScopedSpinLock lock(m_lock);
  1045. do_unblock_joiner();
  1046. }
  1047. void* exit_value() const
  1048. {
  1049. VERIFY(m_thread_did_exit);
  1050. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1051. }
  1052. void try_unblock(JoinBlocker& blocker)
  1053. {
  1054. ScopedSpinLock lock(m_lock);
  1055. if (m_thread_did_exit)
  1056. blocker.unblock(exit_value(), false);
  1057. }
  1058. protected:
  1059. virtual bool should_add_blocker(Blocker& b, void*) override
  1060. {
  1061. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1062. auto& blocker = static_cast<JoinBlocker&>(b);
  1063. // NOTE: m_lock is held already!
  1064. if (m_thread_did_exit) {
  1065. blocker.unblock(exit_value(), true);
  1066. return false;
  1067. }
  1068. return true;
  1069. }
  1070. private:
  1071. void do_unblock_joiner()
  1072. {
  1073. do_unblock([&](Blocker& b, void*, bool&) {
  1074. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1075. auto& blocker = static_cast<JoinBlocker&>(b);
  1076. return blocker.unblock(exit_value(), false);
  1077. });
  1078. }
  1079. Atomic<void*> m_exit_value { nullptr };
  1080. bool m_thread_did_exit { false };
  1081. };
  1082. LockMode unlock_process_if_locked(u32&);
  1083. void relock_process(LockMode, u32);
  1084. void reset_fpu_state();
  1085. mutable RecursiveSpinLock m_lock;
  1086. mutable RecursiveSpinLock m_block_lock;
  1087. NonnullRefPtr<Process> m_process;
  1088. ThreadID m_tid { -1 };
  1089. ThreadRegisters m_regs;
  1090. DebugRegisterState m_debug_register_state {};
  1091. TrapFrame* m_current_trap { nullptr };
  1092. u32 m_saved_critical { 1 };
  1093. IntrusiveListNode<Thread> m_ready_queue_node;
  1094. Atomic<u32> m_cpu { 0 };
  1095. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1096. Optional<u64> m_last_time_scheduled;
  1097. u64 m_total_time_scheduled_user { 0 };
  1098. u64 m_total_time_scheduled_kernel { 0 };
  1099. u32 m_ticks_left { 0 };
  1100. u32 m_times_scheduled { 0 };
  1101. u32 m_ticks_in_user { 0 };
  1102. u32 m_ticks_in_kernel { 0 };
  1103. u32 m_pending_signals { 0 };
  1104. u32 m_signal_mask { 0 };
  1105. FlatPtr m_kernel_stack_base { 0 };
  1106. FlatPtr m_kernel_stack_top { 0 };
  1107. OwnPtr<Memory::Region> m_kernel_stack_region;
  1108. VirtualAddress m_thread_specific_data;
  1109. Optional<Memory::VirtualRange> m_thread_specific_range;
  1110. Array<SignalActionData, NSIG> m_signal_action_data;
  1111. Blocker* m_blocker { nullptr };
  1112. Kernel::Mutex* m_blocking_lock { nullptr };
  1113. u32 m_lock_requested_count { 0 };
  1114. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1115. #if LOCK_DEBUG
  1116. struct HoldingLockInfo {
  1117. Mutex* lock;
  1118. SourceLocation source_location;
  1119. unsigned count;
  1120. };
  1121. Atomic<u32> m_holding_locks { 0 };
  1122. SpinLock<u8> m_holding_locks_lock;
  1123. Vector<HoldingLockInfo> m_holding_locks_list;
  1124. #endif
  1125. JoinBlockCondition m_join_condition;
  1126. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1127. bool m_is_joinable { true };
  1128. bool m_handling_page_fault { false };
  1129. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1130. unsigned m_syscall_count { 0 };
  1131. unsigned m_inode_faults { 0 };
  1132. unsigned m_zero_faults { 0 };
  1133. unsigned m_cow_faults { 0 };
  1134. unsigned m_file_read_bytes { 0 };
  1135. unsigned m_file_write_bytes { 0 };
  1136. unsigned m_unix_socket_read_bytes { 0 };
  1137. unsigned m_unix_socket_write_bytes { 0 };
  1138. unsigned m_ipv4_socket_read_bytes { 0 };
  1139. unsigned m_ipv4_socket_write_bytes { 0 };
  1140. FPUState m_fpu_state {};
  1141. State m_state { Invalid };
  1142. OwnPtr<KString> m_name;
  1143. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1144. State m_stop_state { Invalid };
  1145. bool m_dump_backtrace_on_finalization { false };
  1146. bool m_should_die { false };
  1147. bool m_initialized { false };
  1148. bool m_in_block { false };
  1149. bool m_is_idle_thread { false };
  1150. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1151. Atomic<u32> m_nested_profiler_calls { 0 };
  1152. RefPtr<Timer> m_block_timer;
  1153. // Note: This is needed so when we generate thread stack inodes for ProcFS, we know that
  1154. // we assigned a global Inode index to it so we can use it later
  1155. InodeIndex m_global_procfs_inode_index;
  1156. bool m_is_profiling_suppressed { false };
  1157. void yield_and_release_relock_big_lock();
  1158. void yield_assuming_not_holding_big_lock();
  1159. void drop_thread_count(bool);
  1160. public:
  1161. using ListInProcess = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node>;
  1162. };
  1163. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1164. template<IteratorFunction<Thread&> Callback>
  1165. inline IterationDecision Thread::for_each(Callback callback)
  1166. {
  1167. ScopedSpinLock lock(g_tid_map_lock);
  1168. for (auto& it : *g_tid_map) {
  1169. IterationDecision decision = callback(*it.value);
  1170. if (decision != IterationDecision::Continue)
  1171. return decision;
  1172. }
  1173. return IterationDecision::Continue;
  1174. }
  1175. template<IteratorFunction<Thread&> Callback>
  1176. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1177. {
  1178. ScopedSpinLock lock(g_tid_map_lock);
  1179. for (auto& it : *g_tid_map) {
  1180. auto& thread = *it.value;
  1181. if (thread.state() != state)
  1182. continue;
  1183. IterationDecision decision = callback(thread);
  1184. if (decision != IterationDecision::Continue)
  1185. return decision;
  1186. }
  1187. return IterationDecision::Continue;
  1188. }
  1189. template<VoidFunction<Thread&> Callback>
  1190. inline IterationDecision Thread::for_each(Callback callback)
  1191. {
  1192. ScopedSpinLock lock(g_tid_map_lock);
  1193. for (auto& it : *g_tid_map)
  1194. callback(*it.value);
  1195. return IterationDecision::Continue;
  1196. }
  1197. template<VoidFunction<Thread&> Callback>
  1198. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1199. {
  1200. return for_each_in_state(state, [&](auto& thread) {
  1201. callback(thread);
  1202. return IterationDecision::Continue;
  1203. });
  1204. }
  1205. }
  1206. template<>
  1207. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1208. void format(FormatBuilder&, const Kernel::Thread&);
  1209. };