Thread.h 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/EnumBits.h>
  28. #include <AK/Function.h>
  29. #include <AK/HashMap.h>
  30. #include <AK/IntrusiveList.h>
  31. #include <AK/Optional.h>
  32. #include <AK/OwnPtr.h>
  33. #include <AK/String.h>
  34. #include <AK/Time.h>
  35. #include <AK/Vector.h>
  36. #include <AK/WeakPtr.h>
  37. #include <AK/Weakable.h>
  38. #include <Kernel/Arch/x86/CPU.h>
  39. #include <Kernel/Arch/x86/SafeMem.h>
  40. #include <Kernel/Debug.h>
  41. #include <Kernel/Forward.h>
  42. #include <Kernel/KResult.h>
  43. #include <Kernel/LockMode.h>
  44. #include <Kernel/Scheduler.h>
  45. #include <Kernel/ThreadTracer.h>
  46. #include <Kernel/TimerQueue.h>
  47. #include <Kernel/UnixTypes.h>
  48. #include <LibC/fd_set.h>
  49. #include <LibC/signal_numbers.h>
  50. namespace Kernel {
  51. extern RecursiveSpinLock s_mm_lock;
  52. enum class DispatchSignalResult {
  53. Deferred = 0,
  54. Yield,
  55. Terminate,
  56. Continue
  57. };
  58. struct SignalActionData {
  59. VirtualAddress handler_or_sigaction;
  60. u32 mask { 0 };
  61. int flags { 0 };
  62. };
  63. struct ThreadSpecificData {
  64. ThreadSpecificData* self;
  65. };
  66. #define THREAD_PRIORITY_MIN 1
  67. #define THREAD_PRIORITY_LOW 10
  68. #define THREAD_PRIORITY_NORMAL 30
  69. #define THREAD_PRIORITY_HIGH 50
  70. #define THREAD_PRIORITY_MAX 99
  71. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  72. class Thread
  73. : public RefCounted<Thread>
  74. , public Weakable<Thread> {
  75. AK_MAKE_NONCOPYABLE(Thread);
  76. AK_MAKE_NONMOVABLE(Thread);
  77. friend class Process;
  78. friend class ProtectedProcessBase;
  79. friend class Scheduler;
  80. friend class ThreadReadyQueue;
  81. static SpinLock<u8> g_tid_map_lock;
  82. static HashMap<ThreadID, Thread*>* g_tid_map;
  83. public:
  84. inline static Thread* current()
  85. {
  86. return Processor::current_thread();
  87. }
  88. static void initialize();
  89. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  90. ~Thread();
  91. static RefPtr<Thread> from_tid(ThreadID);
  92. static void finalize_dying_threads();
  93. ThreadID tid() const { return m_tid; }
  94. ProcessID pid() const;
  95. void set_priority(u32 p) { m_priority = p; }
  96. u32 priority() const { return m_priority; }
  97. void detach()
  98. {
  99. ScopedSpinLock lock(m_lock);
  100. m_is_joinable = false;
  101. }
  102. [[nodiscard]] bool is_joinable() const
  103. {
  104. ScopedSpinLock lock(m_lock);
  105. return m_is_joinable;
  106. }
  107. Process& process() { return m_process; }
  108. const Process& process() const { return m_process; }
  109. String name() const
  110. {
  111. // Because the name can be changed, we can't return a const
  112. // reference here. We must make a copy
  113. ScopedSpinLock lock(m_lock);
  114. return m_name;
  115. }
  116. void set_name(const StringView& s)
  117. {
  118. ScopedSpinLock lock(m_lock);
  119. m_name = s;
  120. }
  121. void set_name(String&& name)
  122. {
  123. ScopedSpinLock lock(m_lock);
  124. m_name = move(name);
  125. }
  126. void finalize();
  127. enum State : u8 {
  128. Invalid = 0,
  129. Runnable,
  130. Running,
  131. Dying,
  132. Dead,
  133. Stopped,
  134. Blocked
  135. };
  136. class [[nodiscard]] BlockResult {
  137. public:
  138. enum Type {
  139. WokeNormally,
  140. NotBlocked,
  141. InterruptedBySignal,
  142. InterruptedByDeath,
  143. InterruptedByTimeout,
  144. };
  145. BlockResult() = delete;
  146. BlockResult(Type type)
  147. : m_type(type)
  148. {
  149. }
  150. bool operator==(Type type) const
  151. {
  152. return m_type == type;
  153. }
  154. bool operator!=(Type type) const
  155. {
  156. return m_type != type;
  157. }
  158. [[nodiscard]] bool was_interrupted() const
  159. {
  160. switch (m_type) {
  161. case InterruptedBySignal:
  162. case InterruptedByDeath:
  163. return true;
  164. default:
  165. return false;
  166. }
  167. }
  168. [[nodiscard]] bool timed_out() const
  169. {
  170. return m_type == InterruptedByTimeout;
  171. }
  172. private:
  173. Type m_type;
  174. };
  175. class BlockTimeout {
  176. public:
  177. BlockTimeout()
  178. : m_infinite(true)
  179. {
  180. }
  181. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  182. const Time& absolute_time() const { return m_time; }
  183. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  184. clockid_t clock_id() const { return m_clock_id; }
  185. bool is_infinite() const { return m_infinite; }
  186. bool should_block() const { return m_infinite || m_should_block; };
  187. private:
  188. Time m_time {};
  189. Time m_start_time {};
  190. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  191. bool m_infinite { false };
  192. bool m_should_block { false };
  193. };
  194. class BlockCondition;
  195. class Blocker {
  196. public:
  197. enum class Type {
  198. Unknown = 0,
  199. File,
  200. Futex,
  201. Plan9FS,
  202. Join,
  203. Queue,
  204. Routing,
  205. Sleep,
  206. Wait
  207. };
  208. virtual ~Blocker();
  209. virtual const char* state_string() const = 0;
  210. virtual bool should_block() { return true; }
  211. virtual Type blocker_type() const = 0;
  212. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  213. virtual bool can_be_interrupted() const { return true; }
  214. virtual void not_blocking(bool) = 0;
  215. virtual void was_unblocked(bool did_timeout)
  216. {
  217. if (did_timeout) {
  218. ScopedSpinLock lock(m_lock);
  219. m_did_timeout = true;
  220. }
  221. }
  222. void set_interrupted_by_death()
  223. {
  224. ScopedSpinLock lock(m_lock);
  225. do_set_interrupted_by_death();
  226. }
  227. void set_interrupted_by_signal(u8 signal)
  228. {
  229. ScopedSpinLock lock(m_lock);
  230. do_set_interrupted_by_signal(signal);
  231. }
  232. u8 was_interrupted_by_signal() const
  233. {
  234. ScopedSpinLock lock(m_lock);
  235. return do_get_interrupted_by_signal();
  236. }
  237. virtual Thread::BlockResult block_result()
  238. {
  239. ScopedSpinLock lock(m_lock);
  240. if (m_was_interrupted_by_death)
  241. return Thread::BlockResult::InterruptedByDeath;
  242. if (m_was_interrupted_by_signal != 0)
  243. return Thread::BlockResult::InterruptedBySignal;
  244. if (m_did_timeout)
  245. return Thread::BlockResult::InterruptedByTimeout;
  246. return Thread::BlockResult::WokeNormally;
  247. }
  248. void begin_blocking(Badge<Thread>);
  249. BlockResult end_blocking(Badge<Thread>, bool);
  250. protected:
  251. void do_set_interrupted_by_death()
  252. {
  253. m_was_interrupted_by_death = true;
  254. }
  255. void do_set_interrupted_by_signal(u8 signal)
  256. {
  257. VERIFY(signal != 0);
  258. m_was_interrupted_by_signal = signal;
  259. }
  260. void do_clear_interrupted_by_signal()
  261. {
  262. m_was_interrupted_by_signal = 0;
  263. }
  264. u8 do_get_interrupted_by_signal() const
  265. {
  266. return m_was_interrupted_by_signal;
  267. }
  268. [[nodiscard]] bool was_interrupted() const
  269. {
  270. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  271. }
  272. void unblock_from_blocker()
  273. {
  274. RefPtr<Thread> thread;
  275. {
  276. ScopedSpinLock lock(m_lock);
  277. if (m_is_blocking) {
  278. m_is_blocking = false;
  279. VERIFY(m_blocked_thread);
  280. thread = m_blocked_thread;
  281. }
  282. }
  283. if (thread)
  284. thread->unblock_from_blocker(*this);
  285. }
  286. bool set_block_condition(BlockCondition&, void* = nullptr);
  287. void set_block_condition_raw_locked(BlockCondition* block_condition)
  288. {
  289. m_block_condition = block_condition;
  290. }
  291. mutable RecursiveSpinLock m_lock;
  292. private:
  293. BlockCondition* m_block_condition { nullptr };
  294. void* m_block_data { nullptr };
  295. Thread* m_blocked_thread { nullptr };
  296. u8 m_was_interrupted_by_signal { 0 };
  297. bool m_is_blocking { false };
  298. bool m_was_interrupted_by_death { false };
  299. bool m_did_timeout { false };
  300. };
  301. class BlockCondition {
  302. AK_MAKE_NONCOPYABLE(BlockCondition);
  303. AK_MAKE_NONMOVABLE(BlockCondition);
  304. public:
  305. BlockCondition() = default;
  306. virtual ~BlockCondition()
  307. {
  308. ScopedSpinLock lock(m_lock);
  309. VERIFY(m_blockers.is_empty());
  310. }
  311. bool add_blocker(Blocker& blocker, void* data)
  312. {
  313. ScopedSpinLock lock(m_lock);
  314. if (!should_add_blocker(blocker, data))
  315. return false;
  316. m_blockers.append({ &blocker, data });
  317. return true;
  318. }
  319. void remove_blocker(Blocker& blocker, void* data)
  320. {
  321. ScopedSpinLock lock(m_lock);
  322. // NOTE: it's possible that the blocker is no longer present
  323. m_blockers.remove_first_matching([&](auto& info) {
  324. return info.blocker == &blocker && info.data == data;
  325. });
  326. }
  327. bool is_empty() const
  328. {
  329. ScopedSpinLock lock(m_lock);
  330. return is_empty_locked();
  331. }
  332. protected:
  333. template<typename UnblockOne>
  334. bool unblock(UnblockOne unblock_one)
  335. {
  336. ScopedSpinLock lock(m_lock);
  337. return do_unblock(unblock_one);
  338. }
  339. template<typename UnblockOne>
  340. bool do_unblock(UnblockOne unblock_one)
  341. {
  342. VERIFY(m_lock.is_locked());
  343. bool stop_iterating = false;
  344. bool did_unblock = false;
  345. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  346. auto& info = m_blockers[i];
  347. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  348. m_blockers.remove(i);
  349. did_unblock = true;
  350. continue;
  351. }
  352. i++;
  353. }
  354. return did_unblock;
  355. }
  356. bool is_empty_locked() const
  357. {
  358. VERIFY(m_lock.is_locked());
  359. return m_blockers.is_empty();
  360. }
  361. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  362. struct BlockerInfo {
  363. Blocker* blocker;
  364. void* data;
  365. };
  366. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  367. {
  368. if (m_blockers.size() <= count)
  369. return move(m_blockers);
  370. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  371. VERIFY(move_count > 0);
  372. Vector<BlockerInfo, 4> taken_blockers;
  373. taken_blockers.ensure_capacity(move_count);
  374. for (size_t i = 0; i < move_count; i++)
  375. taken_blockers.append(m_blockers.take(i));
  376. m_blockers.remove(0, move_count);
  377. return taken_blockers;
  378. }
  379. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  380. {
  381. if (blockers_to_append.is_empty())
  382. return;
  383. if (m_blockers.is_empty()) {
  384. m_blockers = move(blockers_to_append);
  385. return;
  386. }
  387. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  388. for (size_t i = 0; i < blockers_to_append.size(); i++)
  389. m_blockers.append(blockers_to_append.take(i));
  390. blockers_to_append.clear();
  391. }
  392. mutable SpinLock<u8> m_lock;
  393. private:
  394. Vector<BlockerInfo, 4> m_blockers;
  395. };
  396. friend class JoinBlocker;
  397. class JoinBlocker final : public Blocker {
  398. public:
  399. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  400. virtual Type blocker_type() const override { return Type::Join; }
  401. virtual const char* state_string() const override { return "Joining"; }
  402. virtual bool can_be_interrupted() const override { return false; }
  403. virtual bool should_block() override { return !m_join_error && m_should_block; }
  404. virtual void not_blocking(bool) override;
  405. bool unblock(void*, bool);
  406. private:
  407. NonnullRefPtr<Thread> m_joinee;
  408. void*& m_joinee_exit_value;
  409. bool m_join_error { false };
  410. bool m_did_unblock { false };
  411. bool m_should_block { true };
  412. };
  413. class QueueBlocker : public Blocker {
  414. public:
  415. explicit QueueBlocker(WaitQueue&, const char* block_reason = nullptr);
  416. virtual ~QueueBlocker();
  417. virtual Type blocker_type() const override { return Type::Queue; }
  418. virtual const char* state_string() const override { return m_block_reason ? m_block_reason : "Queue"; }
  419. virtual void not_blocking(bool) override { }
  420. virtual bool should_block() override
  421. {
  422. return m_should_block;
  423. }
  424. bool unblock();
  425. protected:
  426. const char* const m_block_reason;
  427. bool m_should_block { true };
  428. bool m_did_unblock { false };
  429. };
  430. class FutexBlocker : public Blocker {
  431. public:
  432. explicit FutexBlocker(FutexQueue&, u32);
  433. virtual ~FutexBlocker();
  434. virtual Type blocker_type() const override { return Type::Futex; }
  435. virtual const char* state_string() const override { return "Futex"; }
  436. virtual void not_blocking(bool) override { }
  437. virtual bool should_block() override
  438. {
  439. return m_should_block;
  440. }
  441. u32 bitset() const { return m_bitset; }
  442. void begin_requeue()
  443. {
  444. // We need to hold the lock until we moved it over
  445. m_relock_flags = m_lock.lock();
  446. }
  447. void finish_requeue(FutexQueue&);
  448. bool unblock_bitset(u32 bitset);
  449. bool unblock(bool force = false);
  450. protected:
  451. u32 m_bitset;
  452. u32 m_relock_flags { 0 };
  453. bool m_should_block { true };
  454. bool m_did_unblock { false };
  455. };
  456. class FileBlocker : public Blocker {
  457. public:
  458. enum class BlockFlags : u16 {
  459. None = 0,
  460. Read = 1 << 0,
  461. Write = 1 << 1,
  462. ReadPriority = 1 << 2,
  463. Accept = 1 << 3,
  464. Connect = 1 << 4,
  465. SocketFlags = Accept | Connect,
  466. WriteNotOpen = 1 << 5,
  467. WriteError = 1 << 6,
  468. WriteHangUp = 1 << 7,
  469. ReadHangUp = 1 << 8,
  470. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  471. };
  472. virtual Type blocker_type() const override { return Type::File; }
  473. virtual bool should_block() override
  474. {
  475. return m_should_block;
  476. }
  477. virtual bool unblock(bool, void*) = 0;
  478. protected:
  479. bool m_should_block { true };
  480. };
  481. class FileDescriptionBlocker : public FileBlocker {
  482. public:
  483. const FileDescription& blocked_description() const;
  484. virtual bool unblock(bool, void*) override;
  485. virtual void not_blocking(bool) override;
  486. protected:
  487. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  488. private:
  489. NonnullRefPtr<FileDescription> m_blocked_description;
  490. const BlockFlags m_flags;
  491. BlockFlags& m_unblocked_flags;
  492. bool m_did_unblock { false };
  493. bool m_should_block { true };
  494. };
  495. class AcceptBlocker final : public FileDescriptionBlocker {
  496. public:
  497. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  498. virtual const char* state_string() const override { return "Accepting"; }
  499. };
  500. class ConnectBlocker final : public FileDescriptionBlocker {
  501. public:
  502. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  503. virtual const char* state_string() const override { return "Connecting"; }
  504. };
  505. class WriteBlocker final : public FileDescriptionBlocker {
  506. public:
  507. explicit WriteBlocker(FileDescription&, BlockFlags&);
  508. virtual const char* state_string() const override { return "Writing"; }
  509. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  510. private:
  511. BlockTimeout m_timeout;
  512. };
  513. class ReadBlocker final : public FileDescriptionBlocker {
  514. public:
  515. explicit ReadBlocker(FileDescription&, BlockFlags&);
  516. virtual const char* state_string() const override { return "Reading"; }
  517. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  518. private:
  519. BlockTimeout m_timeout;
  520. };
  521. class SleepBlocker final : public Blocker {
  522. public:
  523. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  524. virtual const char* state_string() const override { return "Sleeping"; }
  525. virtual Type blocker_type() const override { return Type::Sleep; }
  526. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  527. virtual void not_blocking(bool) override;
  528. virtual void was_unblocked(bool) override;
  529. virtual Thread::BlockResult block_result() override;
  530. private:
  531. void calculate_remaining();
  532. BlockTimeout m_deadline;
  533. Time* m_remaining;
  534. };
  535. class SelectBlocker final : public FileBlocker {
  536. public:
  537. struct FDInfo {
  538. NonnullRefPtr<FileDescription> description;
  539. BlockFlags block_flags { BlockFlags::None };
  540. BlockFlags unblocked_flags { BlockFlags::None };
  541. };
  542. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  543. SelectBlocker(FDVector& fds);
  544. virtual ~SelectBlocker();
  545. virtual bool unblock(bool, void*) override;
  546. virtual void not_blocking(bool) override;
  547. virtual void was_unblocked(bool) override;
  548. virtual const char* state_string() const override { return "Selecting"; }
  549. private:
  550. size_t collect_unblocked_flags();
  551. FDVector& m_fds;
  552. bool m_did_unblock { false };
  553. };
  554. class WaitBlocker final : public Blocker {
  555. public:
  556. enum class UnblockFlags {
  557. Terminated,
  558. Stopped,
  559. Continued,
  560. Disowned
  561. };
  562. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  563. virtual const char* state_string() const override { return "Waiting"; }
  564. virtual Type blocker_type() const override { return Type::Wait; }
  565. virtual bool should_block() override { return m_should_block; }
  566. virtual void not_blocking(bool) override;
  567. virtual void was_unblocked(bool) override;
  568. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  569. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  570. private:
  571. void do_was_disowned();
  572. void do_set_result(const siginfo_t&);
  573. const int m_wait_options;
  574. const idtype_t m_id_type;
  575. const pid_t m_waitee_id;
  576. KResultOr<siginfo_t>& m_result;
  577. RefPtr<Process> m_waitee;
  578. RefPtr<ProcessGroup> m_waitee_group;
  579. bool m_did_unblock { false };
  580. bool m_error { false };
  581. bool m_got_sigchild { false };
  582. bool m_should_block;
  583. };
  584. class WaitBlockCondition final : public BlockCondition {
  585. friend class WaitBlocker;
  586. public:
  587. WaitBlockCondition(Process& process)
  588. : m_process(process)
  589. {
  590. }
  591. void disowned_by_waiter(Process&);
  592. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  593. void try_unblock(WaitBlocker&);
  594. void finalize();
  595. protected:
  596. virtual bool should_add_blocker(Blocker&, void*) override;
  597. private:
  598. struct ProcessBlockInfo {
  599. NonnullRefPtr<Process> process;
  600. WaitBlocker::UnblockFlags flags;
  601. u8 signal;
  602. bool was_waited { false };
  603. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  604. ~ProcessBlockInfo();
  605. };
  606. Process& m_process;
  607. Vector<ProcessBlockInfo, 2> m_processes;
  608. bool m_finalized { false };
  609. };
  610. template<typename AddBlockerHandler>
  611. KResult try_join(AddBlockerHandler add_blocker)
  612. {
  613. if (Thread::current() == this)
  614. return EDEADLK;
  615. ScopedSpinLock lock(m_lock);
  616. if (!m_is_joinable || state() == Dead)
  617. return EINVAL;
  618. add_blocker();
  619. // From this point on the thread is no longer joinable by anyone
  620. // else. It also means that if the join is timed, it becomes
  621. // detached when a timeout happens.
  622. m_is_joinable = false;
  623. return KSuccess;
  624. }
  625. void did_schedule() { ++m_times_scheduled; }
  626. u32 times_scheduled() const { return m_times_scheduled; }
  627. void resume_from_stopped();
  628. [[nodiscard]] bool should_be_stopped() const;
  629. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  630. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  631. [[nodiscard]] bool is_in_block() const
  632. {
  633. ScopedSpinLock lock(m_block_lock);
  634. return m_in_block;
  635. }
  636. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  637. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  638. u32 affinity() const { return m_cpu_affinity; }
  639. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  640. RegisterState& get_register_dump_from_stack();
  641. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  642. TSS32& tss() { return m_tss; }
  643. const TSS32& tss() const { return m_tss; }
  644. State state() const { return m_state; }
  645. const char* state_string() const;
  646. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  647. size_t thread_specific_region_size() const;
  648. size_t thread_specific_region_alignment() const;
  649. ALWAYS_INLINE void yield_if_stopped()
  650. {
  651. // If some thread stopped us, we need to yield to someone else
  652. // We check this when entering/exiting a system call. A thread
  653. // may continue to execute in user land until the next timer
  654. // tick or entering the next system call, or if it's in kernel
  655. // mode then we will intercept prior to returning back to user
  656. // mode.
  657. ScopedSpinLock lock(m_lock);
  658. while (state() == Thread::Stopped) {
  659. lock.unlock();
  660. // We shouldn't be holding the big lock here
  661. yield_while_not_holding_big_lock();
  662. lock.lock();
  663. }
  664. }
  665. template<typename T, class... Args>
  666. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  667. {
  668. VERIFY(!Processor::current().in_irq());
  669. VERIFY(this == Thread::current());
  670. ScopedCritical critical;
  671. VERIFY(!s_mm_lock.own_lock());
  672. ScopedSpinLock block_lock(m_block_lock);
  673. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  674. // as it is constructed and registered elsewhere
  675. m_in_block = true;
  676. T t(forward<Args>(args)...);
  677. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  678. // Relaxed semantics are fine for timeout_unblocked because we
  679. // synchronize on the spin locks already.
  680. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  681. RefPtr<Timer> timer;
  682. {
  683. switch (state()) {
  684. case Thread::Stopped:
  685. // It's possible that we were requested to be stopped!
  686. break;
  687. case Thread::Running:
  688. VERIFY(m_blocker == nullptr);
  689. break;
  690. default:
  691. VERIFY_NOT_REACHED();
  692. }
  693. m_blocker = &t;
  694. if (!t.should_block()) {
  695. // Don't block if the wake condition is already met
  696. t.not_blocking(false);
  697. m_blocker = nullptr;
  698. m_in_block = false;
  699. return BlockResult::NotBlocked;
  700. }
  701. auto& block_timeout = t.override_timeout(timeout);
  702. if (!block_timeout.is_infinite()) {
  703. // Process::kill_all_threads may be called at any time, which will mark all
  704. // threads to die. In that case
  705. timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  706. VERIFY(!Processor::current().in_irq());
  707. VERIFY(!g_scheduler_lock.own_lock());
  708. VERIFY(!m_block_lock.own_lock());
  709. // NOTE: this may execute on the same or any other processor!
  710. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  711. ScopedSpinLock block_lock(m_block_lock);
  712. if (m_blocker && timeout_unblocked.exchange(true) == false)
  713. unblock();
  714. });
  715. if (!timer) {
  716. // Timeout is already in the past
  717. t.not_blocking(true);
  718. m_blocker = nullptr;
  719. m_in_block = false;
  720. return BlockResult::InterruptedByTimeout;
  721. }
  722. }
  723. t.begin_blocking({});
  724. set_state(Thread::Blocked);
  725. }
  726. scheduler_lock.unlock();
  727. block_lock.unlock();
  728. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &t, t.state_string());
  729. bool did_timeout = false;
  730. u32 lock_count_to_restore = 0;
  731. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  732. for (;;) {
  733. // Yield to the scheduler, and wait for us to resume unblocked.
  734. VERIFY(!g_scheduler_lock.own_lock());
  735. VERIFY(Processor::current().in_critical());
  736. yield_while_not_holding_big_lock();
  737. VERIFY(Processor::current().in_critical());
  738. ScopedSpinLock block_lock2(m_block_lock);
  739. if (should_be_stopped() || state() == Stopped) {
  740. dbgln("Thread should be stopped, current state: {}", state_string());
  741. set_state(Thread::Blocked);
  742. continue;
  743. }
  744. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  745. block_lock2.unlock();
  746. dbgln("Thread should not be unblocking, current state: {}", state_string());
  747. set_state(Thread::Blocked);
  748. continue;
  749. }
  750. // Prevent the timeout from unblocking this thread if it happens to
  751. // be in the process of firing already
  752. did_timeout |= timeout_unblocked.exchange(true);
  753. if (m_blocker) {
  754. // Remove ourselves...
  755. VERIFY(m_blocker == &t);
  756. m_blocker = nullptr;
  757. }
  758. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &t, t.state_string());
  759. m_in_block = false;
  760. break;
  761. }
  762. if (t.was_interrupted_by_signal()) {
  763. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  764. ScopedSpinLock lock(m_lock);
  765. dispatch_one_pending_signal();
  766. }
  767. // Notify the blocker that we are no longer blocking. It may need
  768. // to clean up now while we're still holding m_lock
  769. auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
  770. if (timer && !did_timeout) {
  771. // Cancel the timer while not holding any locks. This allows
  772. // the timer function to complete before we remove it
  773. // (e.g. if it's on another processor)
  774. TimerQueue::the().cancel_timer(timer.release_nonnull());
  775. }
  776. if (previous_locked != LockMode::Unlocked) {
  777. // NOTE: this may trigger another call to Thread::block(), so
  778. // we need to do this after we're all done and restored m_in_block!
  779. relock_process(previous_locked, lock_count_to_restore);
  780. }
  781. return result;
  782. }
  783. void unblock_from_blocker(Blocker&);
  784. void unblock(u8 signal = 0);
  785. template<class... Args>
  786. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  787. {
  788. VERIFY(this == Thread::current());
  789. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  790. }
  791. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  792. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  793. {
  794. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  795. }
  796. BlockResult sleep_until(clockid_t, const Time&);
  797. BlockResult sleep_until(const Time& duration)
  798. {
  799. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  800. }
  801. // Tell this thread to unblock if needed,
  802. // gracefully unwind the stack and die.
  803. void set_should_die();
  804. [[nodiscard]] bool should_die() const { return m_should_die; }
  805. void die_if_needed();
  806. void exit(void* = nullptr);
  807. bool tick();
  808. void set_ticks_left(u32 t) { m_ticks_left = t; }
  809. u32 ticks_left() const { return m_ticks_left; }
  810. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  811. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  812. void set_state(State, u8 = 0);
  813. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  814. void set_initialized(bool initialized) { m_initialized = initialized; }
  815. void send_urgent_signal_to_self(u8 signal);
  816. void send_signal(u8 signal, Process* sender);
  817. u32 update_signal_mask(u32 signal_mask);
  818. u32 signal_mask_block(sigset_t signal_set, bool block);
  819. u32 signal_mask() const;
  820. void clear_signals();
  821. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  822. DispatchSignalResult dispatch_one_pending_signal();
  823. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  824. DispatchSignalResult dispatch_signal(u8 signal);
  825. void check_dispatch_pending_signal();
  826. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  827. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  828. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  829. u32 pending_signals() const;
  830. u32 pending_signals_for_state() const;
  831. FPUState& fpu_state() { return *m_fpu_state; }
  832. KResult make_thread_specific_region(Badge<Process>);
  833. unsigned syscall_count() const { return m_syscall_count; }
  834. void did_syscall() { ++m_syscall_count; }
  835. unsigned inode_faults() const { return m_inode_faults; }
  836. void did_inode_fault() { ++m_inode_faults; }
  837. unsigned zero_faults() const { return m_zero_faults; }
  838. void did_zero_fault() { ++m_zero_faults; }
  839. unsigned cow_faults() const { return m_cow_faults; }
  840. void did_cow_fault() { ++m_cow_faults; }
  841. unsigned file_read_bytes() const { return m_file_read_bytes; }
  842. unsigned file_write_bytes() const { return m_file_write_bytes; }
  843. void did_file_read(unsigned bytes)
  844. {
  845. m_file_read_bytes += bytes;
  846. }
  847. void did_file_write(unsigned bytes)
  848. {
  849. m_file_write_bytes += bytes;
  850. }
  851. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  852. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  853. void did_unix_socket_read(unsigned bytes)
  854. {
  855. m_unix_socket_read_bytes += bytes;
  856. }
  857. void did_unix_socket_write(unsigned bytes)
  858. {
  859. m_unix_socket_write_bytes += bytes;
  860. }
  861. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  862. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  863. void did_ipv4_socket_read(unsigned bytes)
  864. {
  865. m_ipv4_socket_read_bytes += bytes;
  866. }
  867. void did_ipv4_socket_write(unsigned bytes)
  868. {
  869. m_ipv4_socket_write_bytes += bytes;
  870. }
  871. void set_active(bool active) { m_is_active = active; }
  872. u32 saved_critical() const { return m_saved_critical; }
  873. void save_critical(u32 critical) { m_saved_critical = critical; }
  874. [[nodiscard]] bool is_active() const { return m_is_active; }
  875. [[nodiscard]] bool is_finalizable() const
  876. {
  877. // We can't finalize as long as this thread is still running
  878. // Note that checking for Running state here isn't sufficient
  879. // as the thread may not be in Running state but switching out.
  880. // m_is_active is set to false once the context switch is
  881. // complete and the thread is not executing on any processor.
  882. if (m_is_active.load(AK::memory_order_acquire))
  883. return false;
  884. // We can't finalize until the thread is either detached or
  885. // a join has started. We can't make m_is_joinable atomic
  886. // because that would introduce a race in try_join.
  887. ScopedSpinLock lock(m_lock);
  888. return !m_is_joinable;
  889. }
  890. RefPtr<Thread> clone(Process&);
  891. template<typename Callback>
  892. static IterationDecision for_each_in_state(State, Callback);
  893. template<typename Callback>
  894. static IterationDecision for_each(Callback);
  895. static constexpr u32 default_kernel_stack_size = 65536;
  896. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  897. u32 ticks_in_user() const { return m_ticks_in_user; }
  898. u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
  899. enum class PreviousMode : u8 {
  900. KernelMode = 0,
  901. UserMode
  902. };
  903. PreviousMode previous_mode() const { return m_previous_mode; }
  904. void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
  905. TrapFrame*& current_trap() { return m_current_trap; }
  906. RecursiveSpinLock& get_lock() const { return m_lock; }
  907. #if LOCK_DEBUG
  908. void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
  909. {
  910. VERIFY(refs_delta != 0);
  911. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  912. ScopedSpinLock list_lock(m_holding_locks_lock);
  913. if (refs_delta > 0) {
  914. bool have_existing = false;
  915. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  916. auto& info = m_holding_locks_list[i];
  917. if (info.lock == &lock) {
  918. have_existing = true;
  919. info.count += refs_delta;
  920. break;
  921. }
  922. }
  923. if (!have_existing)
  924. m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
  925. } else {
  926. VERIFY(refs_delta < 0);
  927. bool found = false;
  928. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  929. auto& info = m_holding_locks_list[i];
  930. if (info.lock == &lock) {
  931. VERIFY(info.count >= (unsigned)-refs_delta);
  932. info.count -= (unsigned)-refs_delta;
  933. if (info.count == 0)
  934. m_holding_locks_list.remove(i);
  935. found = true;
  936. break;
  937. }
  938. }
  939. VERIFY(found);
  940. }
  941. }
  942. u32 lock_count() const
  943. {
  944. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  945. }
  946. #endif
  947. bool is_handling_page_fault() const
  948. {
  949. return m_handling_page_fault;
  950. }
  951. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  952. private:
  953. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
  954. IntrusiveListNode m_process_thread_list_node;
  955. int m_runnable_priority { -1 };
  956. friend class WaitQueue;
  957. class JoinBlockCondition : public BlockCondition {
  958. public:
  959. void thread_did_exit(void* exit_value)
  960. {
  961. ScopedSpinLock lock(m_lock);
  962. VERIFY(!m_thread_did_exit);
  963. m_thread_did_exit = true;
  964. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  965. do_unblock_joiner();
  966. }
  967. void thread_finalizing()
  968. {
  969. ScopedSpinLock lock(m_lock);
  970. do_unblock_joiner();
  971. }
  972. void* exit_value() const
  973. {
  974. VERIFY(m_thread_did_exit);
  975. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  976. }
  977. void try_unblock(JoinBlocker& blocker)
  978. {
  979. ScopedSpinLock lock(m_lock);
  980. if (m_thread_did_exit)
  981. blocker.unblock(exit_value(), false);
  982. }
  983. protected:
  984. virtual bool should_add_blocker(Blocker& b, void*) override
  985. {
  986. VERIFY(b.blocker_type() == Blocker::Type::Join);
  987. auto& blocker = static_cast<JoinBlocker&>(b);
  988. // NOTE: m_lock is held already!
  989. if (m_thread_did_exit) {
  990. blocker.unblock(exit_value(), true);
  991. return false;
  992. }
  993. return true;
  994. }
  995. private:
  996. void do_unblock_joiner()
  997. {
  998. do_unblock([&](Blocker& b, void*, bool&) {
  999. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1000. auto& blocker = static_cast<JoinBlocker&>(b);
  1001. return blocker.unblock(exit_value(), false);
  1002. });
  1003. }
  1004. Atomic<void*> m_exit_value { nullptr };
  1005. bool m_thread_did_exit { false };
  1006. };
  1007. LockMode unlock_process_if_locked(u32&);
  1008. void relock_process(LockMode, u32);
  1009. String backtrace();
  1010. void reset_fpu_state();
  1011. mutable RecursiveSpinLock m_lock;
  1012. mutable RecursiveSpinLock m_block_lock;
  1013. NonnullRefPtr<Process> m_process;
  1014. ThreadID m_tid { -1 };
  1015. TSS32 m_tss {};
  1016. TrapFrame* m_current_trap { nullptr };
  1017. u32 m_saved_critical { 1 };
  1018. IntrusiveListNode m_ready_queue_node;
  1019. Atomic<u32> m_cpu { 0 };
  1020. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1021. u32 m_ticks_left { 0 };
  1022. u32 m_times_scheduled { 0 };
  1023. u32 m_ticks_in_user { 0 };
  1024. u32 m_ticks_in_kernel { 0 };
  1025. u32 m_pending_signals { 0 };
  1026. u32 m_signal_mask { 0 };
  1027. u32 m_kernel_stack_base { 0 };
  1028. u32 m_kernel_stack_top { 0 };
  1029. OwnPtr<Region> m_kernel_stack_region;
  1030. VirtualAddress m_thread_specific_data;
  1031. Array<SignalActionData, NSIG> m_signal_action_data;
  1032. Blocker* m_blocker { nullptr };
  1033. #if LOCK_DEBUG
  1034. struct HoldingLockInfo {
  1035. Lock* lock;
  1036. const char* file;
  1037. int line;
  1038. unsigned count;
  1039. };
  1040. Atomic<u32> m_holding_locks { 0 };
  1041. SpinLock<u8> m_holding_locks_lock;
  1042. Vector<HoldingLockInfo> m_holding_locks_list;
  1043. #endif
  1044. JoinBlockCondition m_join_condition;
  1045. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1046. bool m_is_joinable { true };
  1047. bool m_handling_page_fault { false };
  1048. PreviousMode m_previous_mode { PreviousMode::UserMode };
  1049. unsigned m_syscall_count { 0 };
  1050. unsigned m_inode_faults { 0 };
  1051. unsigned m_zero_faults { 0 };
  1052. unsigned m_cow_faults { 0 };
  1053. unsigned m_file_read_bytes { 0 };
  1054. unsigned m_file_write_bytes { 0 };
  1055. unsigned m_unix_socket_read_bytes { 0 };
  1056. unsigned m_unix_socket_write_bytes { 0 };
  1057. unsigned m_ipv4_socket_read_bytes { 0 };
  1058. unsigned m_ipv4_socket_write_bytes { 0 };
  1059. FPUState* m_fpu_state { nullptr };
  1060. State m_state { Invalid };
  1061. String m_name;
  1062. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1063. State m_stop_state { Invalid };
  1064. bool m_dump_backtrace_on_finalization { false };
  1065. bool m_should_die { false };
  1066. bool m_initialized { false };
  1067. bool m_in_block { false };
  1068. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1069. void yield_without_holding_big_lock();
  1070. void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
  1071. void yield_while_not_holding_big_lock();
  1072. void drop_thread_count(bool);
  1073. };
  1074. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1075. template<typename Callback>
  1076. inline IterationDecision Thread::for_each(Callback callback)
  1077. {
  1078. ScopedSpinLock lock(g_tid_map_lock);
  1079. for (auto& it : *g_tid_map) {
  1080. IterationDecision decision = callback(*it.value);
  1081. if (decision != IterationDecision::Continue)
  1082. return decision;
  1083. }
  1084. return IterationDecision::Continue;
  1085. }
  1086. template<typename Callback>
  1087. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1088. {
  1089. ScopedSpinLock lock(g_tid_map_lock);
  1090. for (auto& it : *g_tid_map) {
  1091. auto& thread = *it.value;
  1092. if (thread.state() != state)
  1093. continue;
  1094. IterationDecision decision = callback(thread);
  1095. if (decision != IterationDecision::Continue)
  1096. return decision;
  1097. }
  1098. return IterationDecision::Continue;
  1099. }
  1100. }
  1101. template<>
  1102. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1103. void format(FormatBuilder&, const Kernel::Thread&);
  1104. };