Thread.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/Function.h>
  10. #include <AK/HashMap.h>
  11. #include <AK/IntrusiveList.h>
  12. #include <AK/Optional.h>
  13. #include <AK/OwnPtr.h>
  14. #include <AK/SourceLocation.h>
  15. #include <AK/String.h>
  16. #include <AK/Time.h>
  17. #include <AK/Vector.h>
  18. #include <AK/WeakPtr.h>
  19. #include <AK/Weakable.h>
  20. #include <Kernel/Arch/x86/CPU.h>
  21. #include <Kernel/Arch/x86/SafeMem.h>
  22. #include <Kernel/Debug.h>
  23. #include <Kernel/Forward.h>
  24. #include <Kernel/KResult.h>
  25. #include <Kernel/LockMode.h>
  26. #include <Kernel/Scheduler.h>
  27. #include <Kernel/ThreadTracer.h>
  28. #include <Kernel/TimerQueue.h>
  29. #include <Kernel/UnixTypes.h>
  30. #include <LibC/fd_set.h>
  31. #include <LibC/signal_numbers.h>
  32. namespace Kernel {
  33. extern RecursiveSpinLock s_mm_lock;
  34. enum class DispatchSignalResult {
  35. Deferred = 0,
  36. Yield,
  37. Terminate,
  38. Continue
  39. };
  40. struct SignalActionData {
  41. VirtualAddress handler_or_sigaction;
  42. u32 mask { 0 };
  43. int flags { 0 };
  44. };
  45. struct ThreadSpecificData {
  46. ThreadSpecificData* self;
  47. };
  48. #define THREAD_PRIORITY_MIN 1
  49. #define THREAD_PRIORITY_LOW 10
  50. #define THREAD_PRIORITY_NORMAL 30
  51. #define THREAD_PRIORITY_HIGH 50
  52. #define THREAD_PRIORITY_MAX 99
  53. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  54. class Thread
  55. : public RefCounted<Thread>
  56. , public Weakable<Thread> {
  57. AK_MAKE_NONCOPYABLE(Thread);
  58. AK_MAKE_NONMOVABLE(Thread);
  59. friend class Process;
  60. friend class ProtectedProcessBase;
  61. friend class Scheduler;
  62. friend struct ThreadReadyQueue;
  63. static SpinLock<u8> g_tid_map_lock;
  64. static HashMap<ThreadID, Thread*>* g_tid_map;
  65. public:
  66. inline static Thread* current()
  67. {
  68. return Processor::current_thread();
  69. }
  70. static void initialize();
  71. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  72. ~Thread();
  73. static RefPtr<Thread> from_tid(ThreadID);
  74. static void finalize_dying_threads();
  75. ThreadID tid() const { return m_tid; }
  76. ProcessID pid() const;
  77. void set_priority(u32 p) { m_priority = p; }
  78. u32 priority() const { return m_priority; }
  79. void detach()
  80. {
  81. ScopedSpinLock lock(m_lock);
  82. m_is_joinable = false;
  83. }
  84. [[nodiscard]] bool is_joinable() const
  85. {
  86. ScopedSpinLock lock(m_lock);
  87. return m_is_joinable;
  88. }
  89. Process& process() { return m_process; }
  90. const Process& process() const { return m_process; }
  91. String name() const
  92. {
  93. // Because the name can be changed, we can't return a const
  94. // reference here. We must make a copy
  95. ScopedSpinLock lock(m_lock);
  96. return m_name;
  97. }
  98. void set_name(const StringView& s)
  99. {
  100. ScopedSpinLock lock(m_lock);
  101. m_name = s;
  102. }
  103. void set_name(String&& name)
  104. {
  105. ScopedSpinLock lock(m_lock);
  106. m_name = move(name);
  107. }
  108. void finalize();
  109. enum State : u8 {
  110. Invalid = 0,
  111. Runnable,
  112. Running,
  113. Dying,
  114. Dead,
  115. Stopped,
  116. Blocked
  117. };
  118. class [[nodiscard]] BlockResult {
  119. public:
  120. enum Type {
  121. WokeNormally,
  122. NotBlocked,
  123. InterruptedBySignal,
  124. InterruptedByDeath,
  125. InterruptedByTimeout,
  126. };
  127. BlockResult() = delete;
  128. BlockResult(Type type)
  129. : m_type(type)
  130. {
  131. }
  132. bool operator==(Type type) const
  133. {
  134. return m_type == type;
  135. }
  136. bool operator!=(Type type) const
  137. {
  138. return m_type != type;
  139. }
  140. [[nodiscard]] bool was_interrupted() const
  141. {
  142. switch (m_type) {
  143. case InterruptedBySignal:
  144. case InterruptedByDeath:
  145. return true;
  146. default:
  147. return false;
  148. }
  149. }
  150. [[nodiscard]] bool timed_out() const
  151. {
  152. return m_type == InterruptedByTimeout;
  153. }
  154. private:
  155. Type m_type;
  156. };
  157. class BlockTimeout {
  158. public:
  159. BlockTimeout()
  160. : m_infinite(true)
  161. {
  162. }
  163. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  164. const Time& absolute_time() const { return m_time; }
  165. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  166. clockid_t clock_id() const { return m_clock_id; }
  167. bool is_infinite() const { return m_infinite; }
  168. bool should_block() const { return m_infinite || m_should_block; };
  169. private:
  170. Time m_time {};
  171. Time m_start_time {};
  172. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  173. bool m_infinite { false };
  174. bool m_should_block { false };
  175. };
  176. class BlockCondition;
  177. class Blocker {
  178. public:
  179. enum class Type {
  180. Unknown = 0,
  181. File,
  182. Futex,
  183. Plan9FS,
  184. Join,
  185. Queue,
  186. Routing,
  187. Sleep,
  188. Wait
  189. };
  190. virtual ~Blocker();
  191. virtual const char* state_string() const = 0;
  192. virtual bool should_block() { return true; }
  193. virtual Type blocker_type() const = 0;
  194. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  195. virtual bool can_be_interrupted() const { return true; }
  196. virtual void not_blocking(bool) = 0;
  197. virtual void was_unblocked(bool did_timeout)
  198. {
  199. if (did_timeout) {
  200. ScopedSpinLock lock(m_lock);
  201. m_did_timeout = true;
  202. }
  203. }
  204. void set_interrupted_by_death()
  205. {
  206. ScopedSpinLock lock(m_lock);
  207. do_set_interrupted_by_death();
  208. }
  209. void set_interrupted_by_signal(u8 signal)
  210. {
  211. ScopedSpinLock lock(m_lock);
  212. do_set_interrupted_by_signal(signal);
  213. }
  214. u8 was_interrupted_by_signal() const
  215. {
  216. ScopedSpinLock lock(m_lock);
  217. return do_get_interrupted_by_signal();
  218. }
  219. virtual Thread::BlockResult block_result()
  220. {
  221. ScopedSpinLock lock(m_lock);
  222. if (m_was_interrupted_by_death)
  223. return Thread::BlockResult::InterruptedByDeath;
  224. if (m_was_interrupted_by_signal != 0)
  225. return Thread::BlockResult::InterruptedBySignal;
  226. if (m_did_timeout)
  227. return Thread::BlockResult::InterruptedByTimeout;
  228. return Thread::BlockResult::WokeNormally;
  229. }
  230. void begin_blocking(Badge<Thread>);
  231. BlockResult end_blocking(Badge<Thread>, bool);
  232. protected:
  233. void do_set_interrupted_by_death()
  234. {
  235. m_was_interrupted_by_death = true;
  236. }
  237. void do_set_interrupted_by_signal(u8 signal)
  238. {
  239. VERIFY(signal != 0);
  240. m_was_interrupted_by_signal = signal;
  241. }
  242. void do_clear_interrupted_by_signal()
  243. {
  244. m_was_interrupted_by_signal = 0;
  245. }
  246. u8 do_get_interrupted_by_signal() const
  247. {
  248. return m_was_interrupted_by_signal;
  249. }
  250. [[nodiscard]] bool was_interrupted() const
  251. {
  252. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  253. }
  254. void unblock_from_blocker()
  255. {
  256. RefPtr<Thread> thread;
  257. {
  258. ScopedSpinLock lock(m_lock);
  259. if (m_is_blocking) {
  260. m_is_blocking = false;
  261. VERIFY(m_blocked_thread);
  262. thread = m_blocked_thread;
  263. }
  264. }
  265. if (thread)
  266. thread->unblock_from_blocker(*this);
  267. }
  268. bool set_block_condition(BlockCondition&, void* = nullptr);
  269. void set_block_condition_raw_locked(BlockCondition* block_condition)
  270. {
  271. m_block_condition = block_condition;
  272. }
  273. mutable RecursiveSpinLock m_lock;
  274. private:
  275. BlockCondition* m_block_condition { nullptr };
  276. void* m_block_data { nullptr };
  277. Thread* m_blocked_thread { nullptr };
  278. u8 m_was_interrupted_by_signal { 0 };
  279. bool m_is_blocking { false };
  280. bool m_was_interrupted_by_death { false };
  281. bool m_did_timeout { false };
  282. };
  283. class BlockCondition {
  284. AK_MAKE_NONCOPYABLE(BlockCondition);
  285. AK_MAKE_NONMOVABLE(BlockCondition);
  286. public:
  287. BlockCondition() = default;
  288. virtual ~BlockCondition()
  289. {
  290. ScopedSpinLock lock(m_lock);
  291. VERIFY(m_blockers.is_empty());
  292. }
  293. bool add_blocker(Blocker& blocker, void* data)
  294. {
  295. ScopedSpinLock lock(m_lock);
  296. if (!should_add_blocker(blocker, data))
  297. return false;
  298. m_blockers.append({ &blocker, data });
  299. return true;
  300. }
  301. void remove_blocker(Blocker& blocker, void* data)
  302. {
  303. ScopedSpinLock lock(m_lock);
  304. // NOTE: it's possible that the blocker is no longer present
  305. m_blockers.remove_first_matching([&](auto& info) {
  306. return info.blocker == &blocker && info.data == data;
  307. });
  308. }
  309. bool is_empty() const
  310. {
  311. ScopedSpinLock lock(m_lock);
  312. return is_empty_locked();
  313. }
  314. protected:
  315. template<typename UnblockOne>
  316. bool unblock(UnblockOne unblock_one)
  317. {
  318. ScopedSpinLock lock(m_lock);
  319. return do_unblock(unblock_one);
  320. }
  321. template<typename UnblockOne>
  322. bool do_unblock(UnblockOne unblock_one)
  323. {
  324. VERIFY(m_lock.is_locked());
  325. bool stop_iterating = false;
  326. bool did_unblock = false;
  327. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  328. auto& info = m_blockers[i];
  329. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  330. m_blockers.remove(i);
  331. did_unblock = true;
  332. continue;
  333. }
  334. i++;
  335. }
  336. return did_unblock;
  337. }
  338. bool is_empty_locked() const
  339. {
  340. VERIFY(m_lock.is_locked());
  341. return m_blockers.is_empty();
  342. }
  343. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  344. struct BlockerInfo {
  345. Blocker* blocker;
  346. void* data;
  347. };
  348. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  349. {
  350. if (m_blockers.size() <= count)
  351. return move(m_blockers);
  352. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  353. VERIFY(move_count > 0);
  354. Vector<BlockerInfo, 4> taken_blockers;
  355. taken_blockers.ensure_capacity(move_count);
  356. for (size_t i = 0; i < move_count; i++)
  357. taken_blockers.append(m_blockers.take(i));
  358. m_blockers.remove(0, move_count);
  359. return taken_blockers;
  360. }
  361. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  362. {
  363. if (blockers_to_append.is_empty())
  364. return;
  365. if (m_blockers.is_empty()) {
  366. m_blockers = move(blockers_to_append);
  367. return;
  368. }
  369. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  370. for (size_t i = 0; i < blockers_to_append.size(); i++)
  371. m_blockers.append(blockers_to_append.take(i));
  372. blockers_to_append.clear();
  373. }
  374. mutable SpinLock<u8> m_lock;
  375. private:
  376. Vector<BlockerInfo, 4> m_blockers;
  377. };
  378. friend class JoinBlocker;
  379. class JoinBlocker final : public Blocker {
  380. public:
  381. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  382. virtual Type blocker_type() const override { return Type::Join; }
  383. virtual const char* state_string() const override { return "Joining"; }
  384. virtual bool can_be_interrupted() const override { return false; }
  385. virtual bool should_block() override { return !m_join_error && m_should_block; }
  386. virtual void not_blocking(bool) override;
  387. bool unblock(void*, bool);
  388. private:
  389. NonnullRefPtr<Thread> m_joinee;
  390. void*& m_joinee_exit_value;
  391. bool m_join_error { false };
  392. bool m_did_unblock { false };
  393. bool m_should_block { true };
  394. };
  395. class QueueBlocker : public Blocker {
  396. public:
  397. explicit QueueBlocker(WaitQueue&, const char* block_reason = nullptr);
  398. virtual ~QueueBlocker();
  399. virtual Type blocker_type() const override { return Type::Queue; }
  400. virtual const char* state_string() const override { return m_block_reason ? m_block_reason : "Queue"; }
  401. virtual void not_blocking(bool) override { }
  402. virtual bool should_block() override
  403. {
  404. return m_should_block;
  405. }
  406. bool unblock();
  407. protected:
  408. const char* const m_block_reason;
  409. bool m_should_block { true };
  410. bool m_did_unblock { false };
  411. };
  412. class FutexBlocker : public Blocker {
  413. public:
  414. explicit FutexBlocker(FutexQueue&, u32);
  415. virtual ~FutexBlocker();
  416. virtual Type blocker_type() const override { return Type::Futex; }
  417. virtual const char* state_string() const override { return "Futex"; }
  418. virtual void not_blocking(bool) override { }
  419. virtual bool should_block() override
  420. {
  421. return m_should_block;
  422. }
  423. u32 bitset() const { return m_bitset; }
  424. void begin_requeue()
  425. {
  426. // We need to hold the lock until we moved it over
  427. m_relock_flags = m_lock.lock();
  428. }
  429. void finish_requeue(FutexQueue&);
  430. bool unblock_bitset(u32 bitset);
  431. bool unblock(bool force = false);
  432. protected:
  433. u32 m_bitset;
  434. u32 m_relock_flags { 0 };
  435. bool m_should_block { true };
  436. bool m_did_unblock { false };
  437. };
  438. class FileBlocker : public Blocker {
  439. public:
  440. enum class BlockFlags : u16 {
  441. None = 0,
  442. Read = 1 << 0,
  443. Write = 1 << 1,
  444. ReadPriority = 1 << 2,
  445. Accept = 1 << 3,
  446. Connect = 1 << 4,
  447. SocketFlags = Accept | Connect,
  448. WriteNotOpen = 1 << 5,
  449. WriteError = 1 << 6,
  450. WriteHangUp = 1 << 7,
  451. ReadHangUp = 1 << 8,
  452. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  453. };
  454. virtual Type blocker_type() const override { return Type::File; }
  455. virtual bool should_block() override
  456. {
  457. return m_should_block;
  458. }
  459. virtual bool unblock(bool, void*) = 0;
  460. protected:
  461. bool m_should_block { true };
  462. };
  463. class FileDescriptionBlocker : public FileBlocker {
  464. public:
  465. const FileDescription& blocked_description() const;
  466. virtual bool unblock(bool, void*) override;
  467. virtual void not_blocking(bool) override;
  468. protected:
  469. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  470. private:
  471. NonnullRefPtr<FileDescription> m_blocked_description;
  472. const BlockFlags m_flags;
  473. BlockFlags& m_unblocked_flags;
  474. bool m_did_unblock { false };
  475. };
  476. class AcceptBlocker final : public FileDescriptionBlocker {
  477. public:
  478. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  479. virtual const char* state_string() const override { return "Accepting"; }
  480. };
  481. class ConnectBlocker final : public FileDescriptionBlocker {
  482. public:
  483. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  484. virtual const char* state_string() const override { return "Connecting"; }
  485. };
  486. class WriteBlocker final : public FileDescriptionBlocker {
  487. public:
  488. explicit WriteBlocker(FileDescription&, BlockFlags&);
  489. virtual const char* state_string() const override { return "Writing"; }
  490. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  491. private:
  492. BlockTimeout m_timeout;
  493. };
  494. class ReadBlocker final : public FileDescriptionBlocker {
  495. public:
  496. explicit ReadBlocker(FileDescription&, BlockFlags&);
  497. virtual const char* state_string() const override { return "Reading"; }
  498. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  499. private:
  500. BlockTimeout m_timeout;
  501. };
  502. class SleepBlocker final : public Blocker {
  503. public:
  504. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  505. virtual const char* state_string() const override { return "Sleeping"; }
  506. virtual Type blocker_type() const override { return Type::Sleep; }
  507. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  508. virtual void not_blocking(bool) override;
  509. virtual void was_unblocked(bool) override;
  510. virtual Thread::BlockResult block_result() override;
  511. private:
  512. void calculate_remaining();
  513. BlockTimeout m_deadline;
  514. Time* m_remaining;
  515. };
  516. class SelectBlocker final : public FileBlocker {
  517. public:
  518. struct FDInfo {
  519. NonnullRefPtr<FileDescription> description;
  520. BlockFlags block_flags { BlockFlags::None };
  521. BlockFlags unblocked_flags { BlockFlags::None };
  522. };
  523. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  524. SelectBlocker(FDVector& fds);
  525. virtual ~SelectBlocker();
  526. virtual bool unblock(bool, void*) override;
  527. virtual void not_blocking(bool) override;
  528. virtual void was_unblocked(bool) override;
  529. virtual const char* state_string() const override { return "Selecting"; }
  530. private:
  531. size_t collect_unblocked_flags();
  532. FDVector& m_fds;
  533. bool m_did_unblock { false };
  534. };
  535. class WaitBlocker final : public Blocker {
  536. public:
  537. enum class UnblockFlags {
  538. Terminated,
  539. Stopped,
  540. Continued,
  541. Disowned
  542. };
  543. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  544. virtual const char* state_string() const override { return "Waiting"; }
  545. virtual Type blocker_type() const override { return Type::Wait; }
  546. virtual bool should_block() override { return m_should_block; }
  547. virtual void not_blocking(bool) override;
  548. virtual void was_unblocked(bool) override;
  549. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  550. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  551. private:
  552. void do_was_disowned();
  553. void do_set_result(const siginfo_t&);
  554. const int m_wait_options;
  555. const idtype_t m_id_type;
  556. const pid_t m_waitee_id;
  557. KResultOr<siginfo_t>& m_result;
  558. RefPtr<Process> m_waitee;
  559. RefPtr<ProcessGroup> m_waitee_group;
  560. bool m_did_unblock { false };
  561. bool m_error { false };
  562. bool m_got_sigchild { false };
  563. bool m_should_block;
  564. };
  565. class WaitBlockCondition final : public BlockCondition {
  566. friend class WaitBlocker;
  567. public:
  568. WaitBlockCondition(Process& process)
  569. : m_process(process)
  570. {
  571. }
  572. void disowned_by_waiter(Process&);
  573. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  574. void try_unblock(WaitBlocker&);
  575. void finalize();
  576. protected:
  577. virtual bool should_add_blocker(Blocker&, void*) override;
  578. private:
  579. struct ProcessBlockInfo {
  580. NonnullRefPtr<Process> process;
  581. WaitBlocker::UnblockFlags flags;
  582. u8 signal;
  583. bool was_waited { false };
  584. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  585. ~ProcessBlockInfo();
  586. };
  587. Process& m_process;
  588. Vector<ProcessBlockInfo, 2> m_processes;
  589. bool m_finalized { false };
  590. };
  591. template<typename AddBlockerHandler>
  592. KResult try_join(AddBlockerHandler add_blocker)
  593. {
  594. if (Thread::current() == this)
  595. return EDEADLK;
  596. ScopedSpinLock lock(m_lock);
  597. if (!m_is_joinable || state() == Dead)
  598. return EINVAL;
  599. add_blocker();
  600. // From this point on the thread is no longer joinable by anyone
  601. // else. It also means that if the join is timed, it becomes
  602. // detached when a timeout happens.
  603. m_is_joinable = false;
  604. return KSuccess;
  605. }
  606. void did_schedule() { ++m_times_scheduled; }
  607. u32 times_scheduled() const { return m_times_scheduled; }
  608. void resume_from_stopped();
  609. [[nodiscard]] bool should_be_stopped() const;
  610. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  611. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  612. [[nodiscard]] bool is_in_block() const
  613. {
  614. ScopedSpinLock lock(m_block_lock);
  615. return m_in_block;
  616. }
  617. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  618. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  619. u32 affinity() const { return m_cpu_affinity; }
  620. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  621. RegisterState& get_register_dump_from_stack();
  622. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  623. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  624. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  625. TSS& tss() { return m_tss; }
  626. const TSS& tss() const { return m_tss; }
  627. State state() const { return m_state; }
  628. const char* state_string() const;
  629. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  630. size_t thread_specific_region_size() const;
  631. size_t thread_specific_region_alignment() const;
  632. ALWAYS_INLINE void yield_if_stopped()
  633. {
  634. // If some thread stopped us, we need to yield to someone else
  635. // We check this when entering/exiting a system call. A thread
  636. // may continue to execute in user land until the next timer
  637. // tick or entering the next system call, or if it's in kernel
  638. // mode then we will intercept prior to returning back to user
  639. // mode.
  640. ScopedSpinLock lock(m_lock);
  641. while (state() == Thread::Stopped) {
  642. lock.unlock();
  643. // We shouldn't be holding the big lock here
  644. yield_while_not_holding_big_lock();
  645. lock.lock();
  646. }
  647. }
  648. template<typename BlockerType, class... Args>
  649. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  650. {
  651. VERIFY(!Processor::current().in_irq());
  652. VERIFY(this == Thread::current());
  653. ScopedCritical critical;
  654. VERIFY(!s_mm_lock.own_lock());
  655. ScopedSpinLock block_lock(m_block_lock);
  656. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  657. // as it is constructed and registered elsewhere
  658. m_in_block = true;
  659. BlockerType blocker(forward<Args>(args)...);
  660. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  661. // Relaxed semantics are fine for timeout_unblocked because we
  662. // synchronize on the spin locks already.
  663. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  664. bool timer_was_added = false;
  665. {
  666. switch (state()) {
  667. case Thread::Stopped:
  668. // It's possible that we were requested to be stopped!
  669. break;
  670. case Thread::Running:
  671. VERIFY(m_blocker == nullptr);
  672. break;
  673. default:
  674. VERIFY_NOT_REACHED();
  675. }
  676. m_blocker = &blocker;
  677. if (!blocker.should_block()) {
  678. // Don't block if the wake condition is already met
  679. blocker.not_blocking(false);
  680. m_blocker = nullptr;
  681. m_in_block = false;
  682. return BlockResult::NotBlocked;
  683. }
  684. auto& block_timeout = blocker.override_timeout(timeout);
  685. if (!block_timeout.is_infinite()) {
  686. // Process::kill_all_threads may be called at any time, which will mark all
  687. // threads to die. In that case
  688. timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  689. VERIFY(!Processor::current().in_irq());
  690. VERIFY(!g_scheduler_lock.own_lock());
  691. VERIFY(!m_block_lock.own_lock());
  692. // NOTE: this may execute on the same or any other processor!
  693. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  694. ScopedSpinLock block_lock(m_block_lock);
  695. if (m_blocker && timeout_unblocked.exchange(true) == false)
  696. unblock();
  697. });
  698. if (!timer_was_added) {
  699. // Timeout is already in the past
  700. blocker.not_blocking(true);
  701. m_blocker = nullptr;
  702. m_in_block = false;
  703. return BlockResult::InterruptedByTimeout;
  704. }
  705. }
  706. blocker.begin_blocking({});
  707. set_state(Thread::Blocked);
  708. }
  709. scheduler_lock.unlock();
  710. block_lock.unlock();
  711. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
  712. bool did_timeout = false;
  713. u32 lock_count_to_restore = 0;
  714. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  715. for (;;) {
  716. // Yield to the scheduler, and wait for us to resume unblocked.
  717. VERIFY(!g_scheduler_lock.own_lock());
  718. VERIFY(Processor::current().in_critical());
  719. yield_while_not_holding_big_lock();
  720. VERIFY(Processor::current().in_critical());
  721. ScopedSpinLock block_lock2(m_block_lock);
  722. if (should_be_stopped() || state() == Stopped) {
  723. dbgln("Thread should be stopped, current state: {}", state_string());
  724. set_state(Thread::Blocked);
  725. continue;
  726. }
  727. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  728. block_lock2.unlock();
  729. dbgln("Thread should not be unblocking, current state: {}", state_string());
  730. set_state(Thread::Blocked);
  731. continue;
  732. }
  733. // Prevent the timeout from unblocking this thread if it happens to
  734. // be in the process of firing already
  735. did_timeout |= timeout_unblocked.exchange(true);
  736. if (m_blocker) {
  737. // Remove ourselves...
  738. VERIFY(m_blocker == &blocker);
  739. m_blocker = nullptr;
  740. }
  741. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
  742. m_in_block = false;
  743. break;
  744. }
  745. if (blocker.was_interrupted_by_signal()) {
  746. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  747. ScopedSpinLock lock(m_lock);
  748. dispatch_one_pending_signal();
  749. }
  750. // Notify the blocker that we are no longer blocking. It may need
  751. // to clean up now while we're still holding m_lock
  752. auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
  753. if (timer_was_added && !did_timeout) {
  754. // Cancel the timer while not holding any locks. This allows
  755. // the timer function to complete before we remove it
  756. // (e.g. if it's on another processor)
  757. TimerQueue::the().cancel_timer(*m_block_timer);
  758. }
  759. if (previous_locked != LockMode::Unlocked) {
  760. // NOTE: this may trigger another call to Thread::block(), so
  761. // we need to do this after we're all done and restored m_in_block!
  762. relock_process(previous_locked, lock_count_to_restore);
  763. }
  764. return result;
  765. }
  766. void unblock_from_blocker(Blocker&);
  767. void unblock(u8 signal = 0);
  768. template<class... Args>
  769. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  770. {
  771. VERIFY(this == Thread::current());
  772. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  773. }
  774. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  775. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  776. {
  777. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  778. }
  779. BlockResult sleep_until(clockid_t, const Time&);
  780. BlockResult sleep_until(const Time& duration)
  781. {
  782. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  783. }
  784. // Tell this thread to unblock if needed,
  785. // gracefully unwind the stack and die.
  786. void set_should_die();
  787. [[nodiscard]] bool should_die() const { return m_should_die; }
  788. void die_if_needed();
  789. void exit(void* = nullptr);
  790. bool tick();
  791. void set_ticks_left(u32 t) { m_ticks_left = t; }
  792. u32 ticks_left() const { return m_ticks_left; }
  793. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  794. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  795. void set_state(State, u8 = 0);
  796. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  797. void set_initialized(bool initialized) { m_initialized = initialized; }
  798. void send_urgent_signal_to_self(u8 signal);
  799. void send_signal(u8 signal, Process* sender);
  800. u32 update_signal_mask(u32 signal_mask);
  801. u32 signal_mask_block(sigset_t signal_set, bool block);
  802. u32 signal_mask() const;
  803. void clear_signals();
  804. KResultOr<u32> peek_debug_register(u32 register_index);
  805. KResult poke_debug_register(u32 register_index, u32 data);
  806. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  807. DispatchSignalResult dispatch_one_pending_signal();
  808. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  809. DispatchSignalResult dispatch_signal(u8 signal);
  810. void check_dispatch_pending_signal();
  811. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  812. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  813. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  814. u32 pending_signals() const;
  815. u32 pending_signals_for_state() const;
  816. FPUState& fpu_state() { return *m_fpu_state; }
  817. KResult make_thread_specific_region(Badge<Process>);
  818. unsigned syscall_count() const { return m_syscall_count; }
  819. void did_syscall() { ++m_syscall_count; }
  820. unsigned inode_faults() const { return m_inode_faults; }
  821. void did_inode_fault() { ++m_inode_faults; }
  822. unsigned zero_faults() const { return m_zero_faults; }
  823. void did_zero_fault() { ++m_zero_faults; }
  824. unsigned cow_faults() const { return m_cow_faults; }
  825. void did_cow_fault() { ++m_cow_faults; }
  826. unsigned file_read_bytes() const { return m_file_read_bytes; }
  827. unsigned file_write_bytes() const { return m_file_write_bytes; }
  828. void did_file_read(unsigned bytes)
  829. {
  830. m_file_read_bytes += bytes;
  831. }
  832. void did_file_write(unsigned bytes)
  833. {
  834. m_file_write_bytes += bytes;
  835. }
  836. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  837. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  838. void did_unix_socket_read(unsigned bytes)
  839. {
  840. m_unix_socket_read_bytes += bytes;
  841. }
  842. void did_unix_socket_write(unsigned bytes)
  843. {
  844. m_unix_socket_write_bytes += bytes;
  845. }
  846. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  847. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  848. void did_ipv4_socket_read(unsigned bytes)
  849. {
  850. m_ipv4_socket_read_bytes += bytes;
  851. }
  852. void did_ipv4_socket_write(unsigned bytes)
  853. {
  854. m_ipv4_socket_write_bytes += bytes;
  855. }
  856. void set_active(bool active) { m_is_active = active; }
  857. u32 saved_critical() const { return m_saved_critical; }
  858. void save_critical(u32 critical) { m_saved_critical = critical; }
  859. [[nodiscard]] bool is_active() const { return m_is_active; }
  860. [[nodiscard]] bool is_finalizable() const
  861. {
  862. // We can't finalize as long as this thread is still running
  863. // Note that checking for Running state here isn't sufficient
  864. // as the thread may not be in Running state but switching out.
  865. // m_is_active is set to false once the context switch is
  866. // complete and the thread is not executing on any processor.
  867. if (m_is_active.load(AK::memory_order_acquire))
  868. return false;
  869. // We can't finalize until the thread is either detached or
  870. // a join has started. We can't make m_is_joinable atomic
  871. // because that would introduce a race in try_join.
  872. ScopedSpinLock lock(m_lock);
  873. return !m_is_joinable;
  874. }
  875. RefPtr<Thread> clone(Process&);
  876. template<IteratorFunction<Thread&> Callback>
  877. static IterationDecision for_each_in_state(State, Callback);
  878. template<IteratorFunction<Thread&> Callback>
  879. static IterationDecision for_each(Callback);
  880. template<VoidFunction<Thread&> Callback>
  881. static IterationDecision for_each_in_state(State, Callback);
  882. template<VoidFunction<Thread&> Callback>
  883. static IterationDecision for_each(Callback);
  884. static constexpr u32 default_kernel_stack_size = 65536;
  885. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  886. u32 ticks_in_user() const { return m_ticks_in_user; }
  887. u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
  888. enum class PreviousMode : u8 {
  889. KernelMode = 0,
  890. UserMode
  891. };
  892. PreviousMode previous_mode() const { return m_previous_mode; }
  893. void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
  894. TrapFrame*& current_trap() { return m_current_trap; }
  895. RecursiveSpinLock& get_lock() const { return m_lock; }
  896. #if LOCK_DEBUG
  897. void holding_lock(Lock& lock, int refs_delta, const SourceLocation& location)
  898. {
  899. VERIFY(refs_delta != 0);
  900. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  901. ScopedSpinLock list_lock(m_holding_locks_lock);
  902. if (refs_delta > 0) {
  903. bool have_existing = false;
  904. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  905. auto& info = m_holding_locks_list[i];
  906. if (info.lock == &lock) {
  907. have_existing = true;
  908. info.count += refs_delta;
  909. break;
  910. }
  911. }
  912. if (!have_existing)
  913. m_holding_locks_list.append({ &lock, location, 1 });
  914. } else {
  915. VERIFY(refs_delta < 0);
  916. bool found = false;
  917. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  918. auto& info = m_holding_locks_list[i];
  919. if (info.lock == &lock) {
  920. VERIFY(info.count >= (unsigned)-refs_delta);
  921. info.count -= (unsigned)-refs_delta;
  922. if (info.count == 0)
  923. m_holding_locks_list.remove(i);
  924. found = true;
  925. break;
  926. }
  927. }
  928. VERIFY(found);
  929. }
  930. }
  931. u32 lock_count() const
  932. {
  933. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  934. }
  935. #endif
  936. bool is_handling_page_fault() const
  937. {
  938. return m_handling_page_fault;
  939. }
  940. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  941. void set_idle_thread() { m_is_idle_thread = true; }
  942. bool is_idle_thread() const { return m_is_idle_thread; }
  943. ALWAYS_INLINE u32 enter_profiler()
  944. {
  945. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  946. }
  947. ALWAYS_INLINE u32 leave_profiler()
  948. {
  949. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  950. }
  951. private:
  952. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region>, NonnullRefPtr<Timer>);
  953. IntrusiveListNode<Thread> m_process_thread_list_node;
  954. int m_runnable_priority { -1 };
  955. friend class WaitQueue;
  956. class JoinBlockCondition : public BlockCondition {
  957. public:
  958. void thread_did_exit(void* exit_value)
  959. {
  960. ScopedSpinLock lock(m_lock);
  961. VERIFY(!m_thread_did_exit);
  962. m_thread_did_exit = true;
  963. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  964. do_unblock_joiner();
  965. }
  966. void thread_finalizing()
  967. {
  968. ScopedSpinLock lock(m_lock);
  969. do_unblock_joiner();
  970. }
  971. void* exit_value() const
  972. {
  973. VERIFY(m_thread_did_exit);
  974. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  975. }
  976. void try_unblock(JoinBlocker& blocker)
  977. {
  978. ScopedSpinLock lock(m_lock);
  979. if (m_thread_did_exit)
  980. blocker.unblock(exit_value(), false);
  981. }
  982. protected:
  983. virtual bool should_add_blocker(Blocker& b, void*) override
  984. {
  985. VERIFY(b.blocker_type() == Blocker::Type::Join);
  986. auto& blocker = static_cast<JoinBlocker&>(b);
  987. // NOTE: m_lock is held already!
  988. if (m_thread_did_exit) {
  989. blocker.unblock(exit_value(), true);
  990. return false;
  991. }
  992. return true;
  993. }
  994. private:
  995. void do_unblock_joiner()
  996. {
  997. do_unblock([&](Blocker& b, void*, bool&) {
  998. VERIFY(b.blocker_type() == Blocker::Type::Join);
  999. auto& blocker = static_cast<JoinBlocker&>(b);
  1000. return blocker.unblock(exit_value(), false);
  1001. });
  1002. }
  1003. Atomic<void*> m_exit_value { nullptr };
  1004. bool m_thread_did_exit { false };
  1005. };
  1006. LockMode unlock_process_if_locked(u32&);
  1007. void relock_process(LockMode, u32);
  1008. String backtrace();
  1009. void reset_fpu_state();
  1010. mutable RecursiveSpinLock m_lock;
  1011. mutable RecursiveSpinLock m_block_lock;
  1012. NonnullRefPtr<Process> m_process;
  1013. ThreadID m_tid { -1 };
  1014. TSS m_tss {};
  1015. DebugRegisterState m_debug_register_state {};
  1016. TrapFrame* m_current_trap { nullptr };
  1017. u32 m_saved_critical { 1 };
  1018. IntrusiveListNode<Thread> m_ready_queue_node;
  1019. Atomic<u32> m_cpu { 0 };
  1020. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1021. u32 m_ticks_left { 0 };
  1022. u32 m_times_scheduled { 0 };
  1023. u32 m_ticks_in_user { 0 };
  1024. u32 m_ticks_in_kernel { 0 };
  1025. u32 m_pending_signals { 0 };
  1026. u32 m_signal_mask { 0 };
  1027. u32 m_kernel_stack_base { 0 };
  1028. u32 m_kernel_stack_top { 0 };
  1029. OwnPtr<Region> m_kernel_stack_region;
  1030. VirtualAddress m_thread_specific_data;
  1031. Array<SignalActionData, NSIG> m_signal_action_data;
  1032. Blocker* m_blocker { nullptr };
  1033. #if LOCK_DEBUG
  1034. struct HoldingLockInfo {
  1035. Lock* lock;
  1036. SourceLocation source_location;
  1037. unsigned count;
  1038. };
  1039. Atomic<u32> m_holding_locks { 0 };
  1040. SpinLock<u8> m_holding_locks_lock;
  1041. Vector<HoldingLockInfo> m_holding_locks_list;
  1042. #endif
  1043. JoinBlockCondition m_join_condition;
  1044. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1045. bool m_is_joinable { true };
  1046. bool m_handling_page_fault { false };
  1047. PreviousMode m_previous_mode { PreviousMode::UserMode };
  1048. unsigned m_syscall_count { 0 };
  1049. unsigned m_inode_faults { 0 };
  1050. unsigned m_zero_faults { 0 };
  1051. unsigned m_cow_faults { 0 };
  1052. unsigned m_file_read_bytes { 0 };
  1053. unsigned m_file_write_bytes { 0 };
  1054. unsigned m_unix_socket_read_bytes { 0 };
  1055. unsigned m_unix_socket_write_bytes { 0 };
  1056. unsigned m_ipv4_socket_read_bytes { 0 };
  1057. unsigned m_ipv4_socket_write_bytes { 0 };
  1058. FPUState* m_fpu_state { nullptr };
  1059. State m_state { Invalid };
  1060. String m_name;
  1061. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1062. State m_stop_state { Invalid };
  1063. bool m_dump_backtrace_on_finalization { false };
  1064. bool m_should_die { false };
  1065. bool m_initialized { false };
  1066. bool m_in_block { false };
  1067. bool m_is_idle_thread { false };
  1068. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1069. Atomic<u32> m_nested_profiler_calls { 0 };
  1070. RefPtr<Timer> m_block_timer;
  1071. void yield_without_holding_big_lock();
  1072. void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
  1073. void yield_while_not_holding_big_lock();
  1074. void drop_thread_count(bool);
  1075. };
  1076. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1077. template<IteratorFunction<Thread&> Callback>
  1078. inline IterationDecision Thread::for_each(Callback callback)
  1079. {
  1080. ScopedSpinLock lock(g_tid_map_lock);
  1081. for (auto& it : *g_tid_map) {
  1082. IterationDecision decision = callback(*it.value);
  1083. if (decision != IterationDecision::Continue)
  1084. return decision;
  1085. }
  1086. return IterationDecision::Continue;
  1087. }
  1088. template<IteratorFunction<Thread&> Callback>
  1089. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1090. {
  1091. ScopedSpinLock lock(g_tid_map_lock);
  1092. for (auto& it : *g_tid_map) {
  1093. auto& thread = *it.value;
  1094. if (thread.state() != state)
  1095. continue;
  1096. IterationDecision decision = callback(thread);
  1097. if (decision != IterationDecision::Continue)
  1098. return decision;
  1099. }
  1100. return IterationDecision::Continue;
  1101. }
  1102. template<VoidFunction<Thread&> Callback>
  1103. inline IterationDecision Thread::for_each(Callback callback)
  1104. {
  1105. ScopedSpinLock lock(g_tid_map_lock);
  1106. for (auto& it : *g_tid_map)
  1107. callback(*it.value);
  1108. return IterationDecision::Continue;
  1109. }
  1110. template<VoidFunction<Thread&> Callback>
  1111. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1112. {
  1113. return for_each_in_state(state, [&](auto& thread) {
  1114. callback(thread);
  1115. return IterationDecision::Continue;
  1116. });
  1117. }
  1118. }
  1119. template<>
  1120. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1121. void format(FormatBuilder&, const Kernel::Thread&);
  1122. };