Thread.h 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/HashMap.h>
  29. #include <AK/IntrusiveList.h>
  30. #include <AK/Optional.h>
  31. #include <AK/OwnPtr.h>
  32. #include <AK/String.h>
  33. #include <AK/Time.h>
  34. #include <AK/Vector.h>
  35. #include <AK/WeakPtr.h>
  36. #include <AK/Weakable.h>
  37. #include <Kernel/Arch/i386/CPU.h>
  38. #include <Kernel/Arch/i386/SafeMem.h>
  39. #include <Kernel/Debug.h>
  40. #include <Kernel/Forward.h>
  41. #include <Kernel/KResult.h>
  42. #include <Kernel/LockMode.h>
  43. #include <Kernel/Scheduler.h>
  44. #include <Kernel/ThreadTracer.h>
  45. #include <Kernel/TimerQueue.h>
  46. #include <Kernel/UnixTypes.h>
  47. #include <LibC/fd_set.h>
  48. #include <LibC/signal_numbers.h>
  49. namespace Kernel {
  50. extern RecursiveSpinLock s_mm_lock;
  51. enum class DispatchSignalResult {
  52. Deferred = 0,
  53. Yield,
  54. Terminate,
  55. Continue
  56. };
  57. struct SignalActionData {
  58. VirtualAddress handler_or_sigaction;
  59. u32 mask { 0 };
  60. int flags { 0 };
  61. };
  62. struct ThreadSpecificData {
  63. ThreadSpecificData* self;
  64. };
  65. #define THREAD_PRIORITY_MIN 1
  66. #define THREAD_PRIORITY_LOW 10
  67. #define THREAD_PRIORITY_NORMAL 30
  68. #define THREAD_PRIORITY_HIGH 50
  69. #define THREAD_PRIORITY_MAX 99
  70. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  71. class Thread
  72. : public RefCounted<Thread>
  73. , public Weakable<Thread> {
  74. AK_MAKE_NONCOPYABLE(Thread);
  75. AK_MAKE_NONMOVABLE(Thread);
  76. friend class Process;
  77. friend class Scheduler;
  78. friend class ThreadReadyQueue;
  79. static SpinLock<u8> g_tid_map_lock;
  80. static HashMap<ThreadID, Thread*>* g_tid_map;
  81. public:
  82. inline static Thread* current()
  83. {
  84. return Processor::current_thread();
  85. }
  86. static void initialize();
  87. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  88. ~Thread();
  89. static RefPtr<Thread> from_tid(ThreadID);
  90. static void finalize_dying_threads();
  91. ThreadID tid() const { return m_tid; }
  92. ProcessID pid() const;
  93. void set_priority(u32 p) { m_priority = p; }
  94. u32 priority() const { return m_priority; }
  95. void detach()
  96. {
  97. ScopedSpinLock lock(m_lock);
  98. m_is_joinable = false;
  99. }
  100. [[nodiscard]] bool is_joinable() const
  101. {
  102. ScopedSpinLock lock(m_lock);
  103. return m_is_joinable;
  104. }
  105. Process& process() { return m_process; }
  106. const Process& process() const { return m_process; }
  107. String name() const
  108. {
  109. // Because the name can be changed, we can't return a const
  110. // reference here. We must make a copy
  111. ScopedSpinLock lock(m_lock);
  112. return m_name;
  113. }
  114. void set_name(const StringView& s)
  115. {
  116. ScopedSpinLock lock(m_lock);
  117. m_name = s;
  118. }
  119. void set_name(String&& name)
  120. {
  121. ScopedSpinLock lock(m_lock);
  122. m_name = move(name);
  123. }
  124. void finalize();
  125. enum State : u8 {
  126. Invalid = 0,
  127. Runnable,
  128. Running,
  129. Dying,
  130. Dead,
  131. Stopped,
  132. Blocked
  133. };
  134. class [[nodiscard]] BlockResult {
  135. public:
  136. enum Type {
  137. WokeNormally,
  138. NotBlocked,
  139. InterruptedBySignal,
  140. InterruptedByDeath,
  141. InterruptedByTimeout,
  142. };
  143. BlockResult() = delete;
  144. BlockResult(Type type)
  145. : m_type(type)
  146. {
  147. }
  148. bool operator==(Type type) const
  149. {
  150. return m_type == type;
  151. }
  152. bool operator!=(Type type) const
  153. {
  154. return m_type != type;
  155. }
  156. [[nodiscard]] bool was_interrupted() const
  157. {
  158. switch (m_type) {
  159. case InterruptedBySignal:
  160. case InterruptedByDeath:
  161. return true;
  162. default:
  163. return false;
  164. }
  165. }
  166. [[nodiscard]] bool timed_out() const
  167. {
  168. return m_type == InterruptedByTimeout;
  169. }
  170. private:
  171. Type m_type;
  172. };
  173. class BlockTimeout {
  174. public:
  175. BlockTimeout()
  176. : m_infinite(true)
  177. {
  178. }
  179. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  180. const Time& absolute_time() const { return m_time; }
  181. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  182. clockid_t clock_id() const { return m_clock_id; }
  183. bool is_infinite() const { return m_infinite; }
  184. bool should_block() const { return m_infinite || m_should_block; };
  185. private:
  186. Time m_time {};
  187. Time m_start_time {};
  188. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  189. bool m_infinite { false };
  190. bool m_should_block { false };
  191. };
  192. class BlockCondition;
  193. class Blocker {
  194. public:
  195. enum class Type {
  196. Unknown = 0,
  197. File,
  198. Futex,
  199. Plan9FS,
  200. Join,
  201. Queue,
  202. Routing,
  203. Sleep,
  204. Wait
  205. };
  206. virtual ~Blocker();
  207. virtual const char* state_string() const = 0;
  208. virtual bool should_block() { return true; }
  209. virtual Type blocker_type() const = 0;
  210. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  211. virtual bool can_be_interrupted() const { return true; }
  212. virtual void not_blocking(bool) = 0;
  213. virtual void was_unblocked(bool did_timeout)
  214. {
  215. if (did_timeout) {
  216. ScopedSpinLock lock(m_lock);
  217. m_did_timeout = true;
  218. }
  219. }
  220. void set_interrupted_by_death()
  221. {
  222. ScopedSpinLock lock(m_lock);
  223. do_set_interrupted_by_death();
  224. }
  225. void set_interrupted_by_signal(u8 signal)
  226. {
  227. ScopedSpinLock lock(m_lock);
  228. do_set_interrupted_by_signal(signal);
  229. }
  230. u8 was_interrupted_by_signal() const
  231. {
  232. ScopedSpinLock lock(m_lock);
  233. return do_get_interrupted_by_signal();
  234. }
  235. virtual Thread::BlockResult block_result()
  236. {
  237. ScopedSpinLock lock(m_lock);
  238. if (m_was_interrupted_by_death)
  239. return Thread::BlockResult::InterruptedByDeath;
  240. if (m_was_interrupted_by_signal != 0)
  241. return Thread::BlockResult::InterruptedBySignal;
  242. if (m_did_timeout)
  243. return Thread::BlockResult::InterruptedByTimeout;
  244. return Thread::BlockResult::WokeNormally;
  245. }
  246. void begin_blocking(Badge<Thread>);
  247. BlockResult end_blocking(Badge<Thread>, bool);
  248. protected:
  249. void do_set_interrupted_by_death()
  250. {
  251. m_was_interrupted_by_death = true;
  252. }
  253. void do_set_interrupted_by_signal(u8 signal)
  254. {
  255. VERIFY(signal != 0);
  256. m_was_interrupted_by_signal = signal;
  257. }
  258. void do_clear_interrupted_by_signal()
  259. {
  260. m_was_interrupted_by_signal = 0;
  261. }
  262. u8 do_get_interrupted_by_signal() const
  263. {
  264. return m_was_interrupted_by_signal;
  265. }
  266. [[nodiscard]] bool was_interrupted() const
  267. {
  268. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  269. }
  270. void unblock_from_blocker()
  271. {
  272. RefPtr<Thread> thread;
  273. {
  274. ScopedSpinLock lock(m_lock);
  275. if (m_is_blocking) {
  276. m_is_blocking = false;
  277. VERIFY(m_blocked_thread);
  278. thread = m_blocked_thread;
  279. }
  280. }
  281. if (thread)
  282. thread->unblock_from_blocker(*this);
  283. }
  284. bool set_block_condition(BlockCondition&, void* = nullptr);
  285. void set_block_condition_raw_locked(BlockCondition* block_condition)
  286. {
  287. m_block_condition = block_condition;
  288. }
  289. mutable RecursiveSpinLock m_lock;
  290. private:
  291. BlockCondition* m_block_condition { nullptr };
  292. void* m_block_data { nullptr };
  293. Thread* m_blocked_thread { nullptr };
  294. u8 m_was_interrupted_by_signal { 0 };
  295. bool m_is_blocking { false };
  296. bool m_was_interrupted_by_death { false };
  297. bool m_did_timeout { false };
  298. };
  299. class BlockCondition {
  300. AK_MAKE_NONCOPYABLE(BlockCondition);
  301. AK_MAKE_NONMOVABLE(BlockCondition);
  302. public:
  303. BlockCondition() = default;
  304. virtual ~BlockCondition()
  305. {
  306. ScopedSpinLock lock(m_lock);
  307. VERIFY(m_blockers.is_empty());
  308. }
  309. bool add_blocker(Blocker& blocker, void* data)
  310. {
  311. ScopedSpinLock lock(m_lock);
  312. if (!should_add_blocker(blocker, data))
  313. return false;
  314. m_blockers.append({ &blocker, data });
  315. return true;
  316. }
  317. void remove_blocker(Blocker& blocker, void* data)
  318. {
  319. ScopedSpinLock lock(m_lock);
  320. // NOTE: it's possible that the blocker is no longer present
  321. m_blockers.remove_first_matching([&](auto& info) {
  322. return info.blocker == &blocker && info.data == data;
  323. });
  324. }
  325. bool is_empty() const
  326. {
  327. ScopedSpinLock lock(m_lock);
  328. return is_empty_locked();
  329. }
  330. protected:
  331. template<typename UnblockOne>
  332. bool unblock(UnblockOne unblock_one)
  333. {
  334. ScopedSpinLock lock(m_lock);
  335. return do_unblock(unblock_one);
  336. }
  337. template<typename UnblockOne>
  338. bool do_unblock(UnblockOne unblock_one)
  339. {
  340. VERIFY(m_lock.is_locked());
  341. bool stop_iterating = false;
  342. bool did_unblock = false;
  343. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  344. auto& info = m_blockers[i];
  345. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  346. m_blockers.remove(i);
  347. did_unblock = true;
  348. continue;
  349. }
  350. i++;
  351. }
  352. return did_unblock;
  353. }
  354. bool is_empty_locked() const
  355. {
  356. VERIFY(m_lock.is_locked());
  357. return m_blockers.is_empty();
  358. }
  359. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  360. struct BlockerInfo {
  361. Blocker* blocker;
  362. void* data;
  363. };
  364. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  365. {
  366. if (m_blockers.size() <= count)
  367. return move(m_blockers);
  368. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  369. VERIFY(move_count > 0);
  370. Vector<BlockerInfo, 4> taken_blockers;
  371. taken_blockers.ensure_capacity(move_count);
  372. for (size_t i = 0; i < move_count; i++)
  373. taken_blockers.append(m_blockers.take(i));
  374. m_blockers.remove(0, move_count);
  375. return taken_blockers;
  376. }
  377. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  378. {
  379. if (blockers_to_append.is_empty())
  380. return;
  381. if (m_blockers.is_empty()) {
  382. m_blockers = move(blockers_to_append);
  383. return;
  384. }
  385. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  386. for (size_t i = 0; i < blockers_to_append.size(); i++)
  387. m_blockers.append(blockers_to_append.take(i));
  388. blockers_to_append.clear();
  389. }
  390. mutable SpinLock<u8> m_lock;
  391. private:
  392. Vector<BlockerInfo, 4> m_blockers;
  393. };
  394. friend class JoinBlocker;
  395. class JoinBlocker final : public Blocker {
  396. public:
  397. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  398. virtual Type blocker_type() const override { return Type::Join; }
  399. virtual const char* state_string() const override { return "Joining"; }
  400. virtual bool can_be_interrupted() const override { return false; }
  401. virtual bool should_block() override { return !m_join_error && m_should_block; }
  402. virtual void not_blocking(bool) override;
  403. bool unblock(void*, bool);
  404. private:
  405. NonnullRefPtr<Thread> m_joinee;
  406. void*& m_joinee_exit_value;
  407. bool m_join_error { false };
  408. bool m_did_unblock { false };
  409. bool m_should_block { true };
  410. };
  411. class QueueBlocker : public Blocker {
  412. public:
  413. explicit QueueBlocker(WaitQueue&, const char* block_reason = nullptr);
  414. virtual ~QueueBlocker();
  415. virtual Type blocker_type() const override { return Type::Queue; }
  416. virtual const char* state_string() const override { return m_block_reason ? m_block_reason : "Queue"; }
  417. virtual void not_blocking(bool) override { }
  418. virtual bool should_block() override
  419. {
  420. return m_should_block;
  421. }
  422. bool unblock();
  423. protected:
  424. const char* const m_block_reason;
  425. bool m_should_block { true };
  426. bool m_did_unblock { false };
  427. };
  428. class FutexBlocker : public Blocker {
  429. public:
  430. explicit FutexBlocker(FutexQueue&, u32);
  431. virtual ~FutexBlocker();
  432. virtual Type blocker_type() const override { return Type::Futex; }
  433. virtual const char* state_string() const override { return "Futex"; }
  434. virtual void not_blocking(bool) override { }
  435. virtual bool should_block() override
  436. {
  437. return m_should_block;
  438. }
  439. u32 bitset() const { return m_bitset; }
  440. void begin_requeue()
  441. {
  442. // We need to hold the lock until we moved it over
  443. m_relock_flags = m_lock.lock();
  444. }
  445. void finish_requeue(FutexQueue&);
  446. bool unblock_bitset(u32 bitset);
  447. bool unblock(bool force = false);
  448. protected:
  449. u32 m_bitset;
  450. u32 m_relock_flags { 0 };
  451. bool m_should_block { true };
  452. bool m_did_unblock { false };
  453. };
  454. class FileBlocker : public Blocker {
  455. public:
  456. enum class BlockFlags : u32 {
  457. None = 0,
  458. Read = 1 << 0,
  459. Write = 1 << 1,
  460. ReadPriority = 1 << 2,
  461. Accept = 1 << 3,
  462. Connect = 1 << 4,
  463. SocketFlags = Accept | Connect,
  464. WriteNotOpen = 1 << 5,
  465. WriteError = 1 << 6,
  466. WriteHangUp = 1 << 7,
  467. ReadHangUp = 1 << 8,
  468. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  469. };
  470. virtual Type blocker_type() const override { return Type::File; }
  471. virtual bool should_block() override
  472. {
  473. return m_should_block;
  474. }
  475. virtual bool unblock(bool, void*) = 0;
  476. protected:
  477. bool m_should_block { true };
  478. };
  479. class FileDescriptionBlocker : public FileBlocker {
  480. public:
  481. const FileDescription& blocked_description() const;
  482. virtual bool unblock(bool, void*) override;
  483. virtual void not_blocking(bool) override;
  484. protected:
  485. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  486. private:
  487. NonnullRefPtr<FileDescription> m_blocked_description;
  488. const BlockFlags m_flags;
  489. BlockFlags& m_unblocked_flags;
  490. bool m_did_unblock { false };
  491. bool m_should_block { true };
  492. };
  493. class AcceptBlocker final : public FileDescriptionBlocker {
  494. public:
  495. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  496. virtual const char* state_string() const override { return "Accepting"; }
  497. };
  498. class ConnectBlocker final : public FileDescriptionBlocker {
  499. public:
  500. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  501. virtual const char* state_string() const override { return "Connecting"; }
  502. };
  503. class WriteBlocker final : public FileDescriptionBlocker {
  504. public:
  505. explicit WriteBlocker(FileDescription&, BlockFlags&);
  506. virtual const char* state_string() const override { return "Writing"; }
  507. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  508. private:
  509. BlockTimeout m_timeout;
  510. };
  511. class ReadBlocker final : public FileDescriptionBlocker {
  512. public:
  513. explicit ReadBlocker(FileDescription&, BlockFlags&);
  514. virtual const char* state_string() const override { return "Reading"; }
  515. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  516. private:
  517. BlockTimeout m_timeout;
  518. };
  519. class SleepBlocker final : public Blocker {
  520. public:
  521. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  522. virtual const char* state_string() const override { return "Sleeping"; }
  523. virtual Type blocker_type() const override { return Type::Sleep; }
  524. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  525. virtual void not_blocking(bool) override;
  526. virtual void was_unblocked(bool) override;
  527. virtual Thread::BlockResult block_result() override;
  528. private:
  529. void calculate_remaining();
  530. BlockTimeout m_deadline;
  531. Time* m_remaining;
  532. };
  533. class SelectBlocker final : public FileBlocker {
  534. public:
  535. struct FDInfo {
  536. NonnullRefPtr<FileDescription> description;
  537. BlockFlags block_flags;
  538. BlockFlags unblocked_flags { BlockFlags::None };
  539. };
  540. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  541. SelectBlocker(FDVector& fds);
  542. virtual ~SelectBlocker();
  543. virtual bool unblock(bool, void*) override;
  544. virtual void not_blocking(bool) override;
  545. virtual void was_unblocked(bool) override;
  546. virtual const char* state_string() const override { return "Selecting"; }
  547. private:
  548. size_t collect_unblocked_flags();
  549. FDVector& m_fds;
  550. bool m_did_unblock { false };
  551. };
  552. class WaitBlocker final : public Blocker {
  553. public:
  554. enum class UnblockFlags {
  555. Terminated,
  556. Stopped,
  557. Continued,
  558. Disowned
  559. };
  560. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  561. virtual const char* state_string() const override { return "Waiting"; }
  562. virtual Type blocker_type() const override { return Type::Wait; }
  563. virtual bool should_block() override { return m_should_block; }
  564. virtual void not_blocking(bool) override;
  565. virtual void was_unblocked(bool) override;
  566. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  567. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  568. private:
  569. void do_was_disowned();
  570. void do_set_result(const siginfo_t&);
  571. const int m_wait_options;
  572. const idtype_t m_id_type;
  573. const pid_t m_waitee_id;
  574. KResultOr<siginfo_t>& m_result;
  575. RefPtr<Process> m_waitee;
  576. RefPtr<ProcessGroup> m_waitee_group;
  577. bool m_did_unblock { false };
  578. bool m_error { false };
  579. bool m_got_sigchild { false };
  580. bool m_should_block;
  581. };
  582. class WaitBlockCondition final : public BlockCondition {
  583. friend class WaitBlocker;
  584. public:
  585. WaitBlockCondition(Process& process)
  586. : m_process(process)
  587. {
  588. }
  589. void disowned_by_waiter(Process&);
  590. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  591. void try_unblock(WaitBlocker&);
  592. void finalize();
  593. protected:
  594. virtual bool should_add_blocker(Blocker&, void*) override;
  595. private:
  596. struct ProcessBlockInfo {
  597. NonnullRefPtr<Process> process;
  598. WaitBlocker::UnblockFlags flags;
  599. u8 signal;
  600. bool was_waited { false };
  601. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  602. ~ProcessBlockInfo();
  603. };
  604. Process& m_process;
  605. Vector<ProcessBlockInfo, 2> m_processes;
  606. bool m_finalized { false };
  607. };
  608. template<typename AddBlockerHandler>
  609. KResult try_join(AddBlockerHandler add_blocker)
  610. {
  611. if (Thread::current() == this)
  612. return EDEADLK;
  613. ScopedSpinLock lock(m_lock);
  614. if (!m_is_joinable || state() == Dead)
  615. return EINVAL;
  616. add_blocker();
  617. // From this point on the thread is no longer joinable by anyone
  618. // else. It also means that if the join is timed, it becomes
  619. // detached when a timeout happens.
  620. m_is_joinable = false;
  621. return KSuccess;
  622. }
  623. void did_schedule() { ++m_times_scheduled; }
  624. u32 times_scheduled() const { return m_times_scheduled; }
  625. void resume_from_stopped();
  626. [[nodiscard]] bool should_be_stopped() const;
  627. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  628. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  629. [[nodiscard]] bool is_in_block() const
  630. {
  631. ScopedSpinLock lock(m_block_lock);
  632. return m_in_block;
  633. }
  634. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  635. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  636. u32 affinity() const { return m_cpu_affinity; }
  637. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  638. RegisterState& get_register_dump_from_stack();
  639. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  640. TSS32& tss() { return m_tss; }
  641. const TSS32& tss() const { return m_tss; }
  642. State state() const { return m_state; }
  643. const char* state_string() const;
  644. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  645. size_t thread_specific_region_size() const;
  646. size_t thread_specific_region_alignment() const;
  647. ALWAYS_INLINE void yield_if_stopped()
  648. {
  649. // If some thread stopped us, we need to yield to someone else
  650. // We check this when entering/exiting a system call. A thread
  651. // may continue to execute in user land until the next timer
  652. // tick or entering the next system call, or if it's in kernel
  653. // mode then we will intercept prior to returning back to user
  654. // mode.
  655. ScopedSpinLock lock(m_lock);
  656. while (state() == Thread::Stopped) {
  657. lock.unlock();
  658. // We shouldn't be holding the big lock here
  659. yield_while_not_holding_big_lock();
  660. lock.lock();
  661. }
  662. }
  663. template<typename T, class... Args>
  664. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  665. {
  666. VERIFY(!Processor::current().in_irq());
  667. VERIFY(this == Thread::current());
  668. ScopedCritical critical;
  669. VERIFY(!s_mm_lock.own_lock());
  670. ScopedSpinLock block_lock(m_block_lock);
  671. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  672. // as it is constructed and registered elsewhere
  673. m_in_block = true;
  674. T t(forward<Args>(args)...);
  675. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  676. // Relaxed semantics are fine for timeout_unblocked because we
  677. // synchronize on the spin locks already.
  678. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  679. RefPtr<Timer> timer;
  680. {
  681. switch (state()) {
  682. case Thread::Stopped:
  683. // It's possible that we were requested to be stopped!
  684. break;
  685. case Thread::Running:
  686. VERIFY(m_blocker == nullptr);
  687. break;
  688. default:
  689. VERIFY_NOT_REACHED();
  690. }
  691. m_blocker = &t;
  692. if (!t.should_block()) {
  693. // Don't block if the wake condition is already met
  694. t.not_blocking(false);
  695. m_blocker = nullptr;
  696. m_in_block = false;
  697. return BlockResult::NotBlocked;
  698. }
  699. auto& block_timeout = t.override_timeout(timeout);
  700. if (!block_timeout.is_infinite()) {
  701. // Process::kill_all_threads may be called at any time, which will mark all
  702. // threads to die. In that case
  703. timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  704. VERIFY(!Processor::current().in_irq());
  705. VERIFY(!g_scheduler_lock.own_lock());
  706. VERIFY(!m_block_lock.own_lock());
  707. // NOTE: this may execute on the same or any other processor!
  708. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  709. ScopedSpinLock block_lock(m_block_lock);
  710. if (m_blocker && timeout_unblocked.exchange(true) == false)
  711. unblock();
  712. });
  713. if (!timer) {
  714. // Timeout is already in the past
  715. t.not_blocking(true);
  716. m_blocker = nullptr;
  717. m_in_block = false;
  718. return BlockResult::InterruptedByTimeout;
  719. }
  720. }
  721. t.begin_blocking({});
  722. set_state(Thread::Blocked);
  723. }
  724. scheduler_lock.unlock();
  725. block_lock.unlock();
  726. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &t, t.state_string());
  727. bool did_timeout = false;
  728. u32 lock_count_to_restore = 0;
  729. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  730. for (;;) {
  731. // Yield to the scheduler, and wait for us to resume unblocked.
  732. VERIFY(!g_scheduler_lock.own_lock());
  733. VERIFY(Processor::current().in_critical());
  734. yield_while_not_holding_big_lock();
  735. VERIFY(Processor::current().in_critical());
  736. ScopedSpinLock block_lock2(m_block_lock);
  737. if (should_be_stopped() || state() == Stopped) {
  738. dbgln("Thread should be stopped, current state: {}", state_string());
  739. set_state(Thread::Blocked);
  740. continue;
  741. }
  742. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  743. block_lock2.unlock();
  744. dbgln("Thread should not be unblocking, current state: {}", state_string());
  745. set_state(Thread::Blocked);
  746. continue;
  747. }
  748. // Prevent the timeout from unblocking this thread if it happens to
  749. // be in the process of firing already
  750. did_timeout |= timeout_unblocked.exchange(true);
  751. if (m_blocker) {
  752. // Remove ourselves...
  753. VERIFY(m_blocker == &t);
  754. m_blocker = nullptr;
  755. }
  756. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &t, t.state_string());
  757. m_in_block = false;
  758. break;
  759. }
  760. if (t.was_interrupted_by_signal()) {
  761. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  762. ScopedSpinLock lock(m_lock);
  763. dispatch_one_pending_signal();
  764. }
  765. // Notify the blocker that we are no longer blocking. It may need
  766. // to clean up now while we're still holding m_lock
  767. auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
  768. if (timer && !did_timeout) {
  769. // Cancel the timer while not holding any locks. This allows
  770. // the timer function to complete before we remove it
  771. // (e.g. if it's on another processor)
  772. TimerQueue::the().cancel_timer(timer.release_nonnull());
  773. }
  774. if (previous_locked != LockMode::Unlocked) {
  775. // NOTE: this may trigger another call to Thread::block(), so
  776. // we need to do this after we're all done and restored m_in_block!
  777. relock_process(previous_locked, lock_count_to_restore);
  778. }
  779. return result;
  780. }
  781. void unblock_from_blocker(Blocker&);
  782. void unblock(u8 signal = 0);
  783. template<class... Args>
  784. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  785. {
  786. VERIFY(this == Thread::current());
  787. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  788. }
  789. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  790. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  791. {
  792. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  793. }
  794. BlockResult sleep_until(clockid_t, const Time&);
  795. BlockResult sleep_until(const Time& duration)
  796. {
  797. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  798. }
  799. // Tell this thread to unblock if needed,
  800. // gracefully unwind the stack and die.
  801. void set_should_die();
  802. [[nodiscard]] bool should_die() const { return m_should_die; }
  803. void die_if_needed();
  804. void exit(void* = nullptr);
  805. bool tick();
  806. void set_ticks_left(u32 t) { m_ticks_left = t; }
  807. u32 ticks_left() const { return m_ticks_left; }
  808. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  809. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  810. void set_state(State, u8 = 0);
  811. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  812. void set_initialized(bool initialized) { m_initialized = initialized; }
  813. void send_urgent_signal_to_self(u8 signal);
  814. void send_signal(u8 signal, Process* sender);
  815. u32 update_signal_mask(u32 signal_mask);
  816. u32 signal_mask_block(sigset_t signal_set, bool block);
  817. u32 signal_mask() const;
  818. void clear_signals();
  819. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  820. DispatchSignalResult dispatch_one_pending_signal();
  821. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  822. DispatchSignalResult dispatch_signal(u8 signal);
  823. void check_dispatch_pending_signal();
  824. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  825. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  826. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  827. u32 pending_signals() const;
  828. u32 pending_signals_for_state() const;
  829. FPUState& fpu_state() { return *m_fpu_state; }
  830. KResult make_thread_specific_region(Badge<Process>);
  831. unsigned syscall_count() const { return m_syscall_count; }
  832. void did_syscall() { ++m_syscall_count; }
  833. unsigned inode_faults() const { return m_inode_faults; }
  834. void did_inode_fault() { ++m_inode_faults; }
  835. unsigned zero_faults() const { return m_zero_faults; }
  836. void did_zero_fault() { ++m_zero_faults; }
  837. unsigned cow_faults() const { return m_cow_faults; }
  838. void did_cow_fault() { ++m_cow_faults; }
  839. unsigned file_read_bytes() const { return m_file_read_bytes; }
  840. unsigned file_write_bytes() const { return m_file_write_bytes; }
  841. void did_file_read(unsigned bytes)
  842. {
  843. m_file_read_bytes += bytes;
  844. }
  845. void did_file_write(unsigned bytes)
  846. {
  847. m_file_write_bytes += bytes;
  848. }
  849. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  850. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  851. void did_unix_socket_read(unsigned bytes)
  852. {
  853. m_unix_socket_read_bytes += bytes;
  854. }
  855. void did_unix_socket_write(unsigned bytes)
  856. {
  857. m_unix_socket_write_bytes += bytes;
  858. }
  859. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  860. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  861. void did_ipv4_socket_read(unsigned bytes)
  862. {
  863. m_ipv4_socket_read_bytes += bytes;
  864. }
  865. void did_ipv4_socket_write(unsigned bytes)
  866. {
  867. m_ipv4_socket_write_bytes += bytes;
  868. }
  869. void set_active(bool active) { m_is_active = active; }
  870. u32 saved_critical() const { return m_saved_critical; }
  871. void save_critical(u32 critical) { m_saved_critical = critical; }
  872. [[nodiscard]] bool is_active() const { return m_is_active; }
  873. [[nodiscard]] bool is_finalizable() const
  874. {
  875. // We can't finalize as long as this thread is still running
  876. // Note that checking for Running state here isn't sufficient
  877. // as the thread may not be in Running state but switching out.
  878. // m_is_active is set to false once the context switch is
  879. // complete and the thread is not executing on any processor.
  880. if (m_is_active.load(AK::memory_order_acquire))
  881. return false;
  882. // We can't finalize until the thread is either detached or
  883. // a join has started. We can't make m_is_joinable atomic
  884. // because that would introduce a race in try_join.
  885. ScopedSpinLock lock(m_lock);
  886. return !m_is_joinable;
  887. }
  888. RefPtr<Thread> clone(Process&);
  889. template<typename Callback>
  890. static IterationDecision for_each_in_state(State, Callback);
  891. template<typename Callback>
  892. static IterationDecision for_each(Callback);
  893. static constexpr u32 default_kernel_stack_size = 65536;
  894. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  895. u32 ticks_in_user() const { return m_ticks_in_user; }
  896. u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
  897. enum class PreviousMode : u8 {
  898. KernelMode = 0,
  899. UserMode
  900. };
  901. PreviousMode previous_mode() const { return m_previous_mode; }
  902. void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
  903. TrapFrame*& current_trap() { return m_current_trap; }
  904. RecursiveSpinLock& get_lock() const { return m_lock; }
  905. #if LOCK_DEBUG
  906. void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
  907. {
  908. VERIFY(refs_delta != 0);
  909. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  910. ScopedSpinLock list_lock(m_holding_locks_lock);
  911. if (refs_delta > 0) {
  912. bool have_existing = false;
  913. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  914. auto& info = m_holding_locks_list[i];
  915. if (info.lock == &lock) {
  916. have_existing = true;
  917. info.count += refs_delta;
  918. break;
  919. }
  920. }
  921. if (!have_existing)
  922. m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
  923. } else {
  924. VERIFY(refs_delta < 0);
  925. bool found = false;
  926. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  927. auto& info = m_holding_locks_list[i];
  928. if (info.lock == &lock) {
  929. VERIFY(info.count >= (unsigned)-refs_delta);
  930. info.count -= (unsigned)-refs_delta;
  931. if (info.count == 0)
  932. m_holding_locks_list.remove(i);
  933. found = true;
  934. break;
  935. }
  936. }
  937. VERIFY(found);
  938. }
  939. }
  940. u32 lock_count() const
  941. {
  942. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  943. }
  944. #endif
  945. bool is_handling_page_fault() const
  946. {
  947. return m_handling_page_fault;
  948. }
  949. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  950. private:
  951. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
  952. IntrusiveListNode m_process_thread_list_node;
  953. int m_runnable_priority { -1 };
  954. friend class WaitQueue;
  955. class JoinBlockCondition : public BlockCondition {
  956. public:
  957. void thread_did_exit(void* exit_value)
  958. {
  959. ScopedSpinLock lock(m_lock);
  960. VERIFY(!m_thread_did_exit);
  961. m_thread_did_exit = true;
  962. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  963. do_unblock_joiner();
  964. }
  965. void thread_finalizing()
  966. {
  967. ScopedSpinLock lock(m_lock);
  968. do_unblock_joiner();
  969. }
  970. void* exit_value() const
  971. {
  972. VERIFY(m_thread_did_exit);
  973. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  974. }
  975. void try_unblock(JoinBlocker& blocker)
  976. {
  977. ScopedSpinLock lock(m_lock);
  978. if (m_thread_did_exit)
  979. blocker.unblock(exit_value(), false);
  980. }
  981. protected:
  982. virtual bool should_add_blocker(Blocker& b, void*) override
  983. {
  984. VERIFY(b.blocker_type() == Blocker::Type::Join);
  985. auto& blocker = static_cast<JoinBlocker&>(b);
  986. // NOTE: m_lock is held already!
  987. if (m_thread_did_exit) {
  988. blocker.unblock(exit_value(), true);
  989. return false;
  990. }
  991. return true;
  992. }
  993. private:
  994. void do_unblock_joiner()
  995. {
  996. do_unblock([&](Blocker& b, void*, bool&) {
  997. VERIFY(b.blocker_type() == Blocker::Type::Join);
  998. auto& blocker = static_cast<JoinBlocker&>(b);
  999. return blocker.unblock(exit_value(), false);
  1000. });
  1001. }
  1002. Atomic<void*> m_exit_value { nullptr };
  1003. bool m_thread_did_exit { false };
  1004. };
  1005. LockMode unlock_process_if_locked(u32&);
  1006. void relock_process(LockMode, u32);
  1007. String backtrace();
  1008. void reset_fpu_state();
  1009. mutable RecursiveSpinLock m_lock;
  1010. mutable RecursiveSpinLock m_block_lock;
  1011. NonnullRefPtr<Process> m_process;
  1012. ThreadID m_tid { -1 };
  1013. TSS32 m_tss {};
  1014. TrapFrame* m_current_trap { nullptr };
  1015. u32 m_saved_critical { 1 };
  1016. IntrusiveListNode m_ready_queue_node;
  1017. Atomic<u32> m_cpu { 0 };
  1018. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1019. u32 m_ticks_left { 0 };
  1020. u32 m_times_scheduled { 0 };
  1021. u32 m_ticks_in_user { 0 };
  1022. u32 m_ticks_in_kernel { 0 };
  1023. u32 m_pending_signals { 0 };
  1024. u32 m_signal_mask { 0 };
  1025. u32 m_kernel_stack_base { 0 };
  1026. u32 m_kernel_stack_top { 0 };
  1027. OwnPtr<Region> m_kernel_stack_region;
  1028. VirtualAddress m_thread_specific_data;
  1029. Array<SignalActionData, NSIG> m_signal_action_data;
  1030. Blocker* m_blocker { nullptr };
  1031. #if LOCK_DEBUG
  1032. struct HoldingLockInfo {
  1033. Lock* lock;
  1034. const char* file;
  1035. int line;
  1036. unsigned count;
  1037. };
  1038. Atomic<u32> m_holding_locks { 0 };
  1039. SpinLock<u8> m_holding_locks_lock;
  1040. Vector<HoldingLockInfo> m_holding_locks_list;
  1041. #endif
  1042. JoinBlockCondition m_join_condition;
  1043. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1044. bool m_is_joinable { true };
  1045. bool m_handling_page_fault { false };
  1046. PreviousMode m_previous_mode { PreviousMode::UserMode };
  1047. unsigned m_syscall_count { 0 };
  1048. unsigned m_inode_faults { 0 };
  1049. unsigned m_zero_faults { 0 };
  1050. unsigned m_cow_faults { 0 };
  1051. unsigned m_file_read_bytes { 0 };
  1052. unsigned m_file_write_bytes { 0 };
  1053. unsigned m_unix_socket_read_bytes { 0 };
  1054. unsigned m_unix_socket_write_bytes { 0 };
  1055. unsigned m_ipv4_socket_read_bytes { 0 };
  1056. unsigned m_ipv4_socket_write_bytes { 0 };
  1057. FPUState* m_fpu_state { nullptr };
  1058. State m_state { Invalid };
  1059. String m_name;
  1060. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1061. State m_stop_state { Invalid };
  1062. bool m_dump_backtrace_on_finalization { false };
  1063. bool m_should_die { false };
  1064. bool m_initialized { false };
  1065. bool m_in_block { false };
  1066. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1067. void yield_without_holding_big_lock();
  1068. void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
  1069. void yield_while_not_holding_big_lock();
  1070. void drop_thread_count(bool);
  1071. };
  1072. template<typename Callback>
  1073. inline IterationDecision Thread::for_each(Callback callback)
  1074. {
  1075. ScopedSpinLock lock(g_tid_map_lock);
  1076. for (auto& it : *g_tid_map) {
  1077. IterationDecision decision = callback(*it.value);
  1078. if (decision != IterationDecision::Continue)
  1079. return decision;
  1080. }
  1081. return IterationDecision::Continue;
  1082. }
  1083. template<typename Callback>
  1084. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1085. {
  1086. ScopedSpinLock lock(g_tid_map_lock);
  1087. for (auto& it : *g_tid_map) {
  1088. auto& thread = *it.value;
  1089. if (thread.state() != state)
  1090. continue;
  1091. IterationDecision decision = callback(thread);
  1092. if (decision != IterationDecision::Continue)
  1093. return decision;
  1094. }
  1095. return IterationDecision::Continue;
  1096. }
  1097. const LogStream& operator<<(const LogStream&, const Thread&);
  1098. }
  1099. template<>
  1100. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1101. void format(FormatBuilder&, const Kernel::Thread&);
  1102. };