Thread.h 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/Error.h>
  10. #include <AK/HashMap.h>
  11. #include <AK/IntrusiveList.h>
  12. #include <AK/Optional.h>
  13. #include <AK/OwnPtr.h>
  14. #include <AK/String.h>
  15. #include <AK/TemporaryChange.h>
  16. #include <AK/Time.h>
  17. #include <AK/Variant.h>
  18. #include <AK/Vector.h>
  19. #include <AK/WeakPtr.h>
  20. #include <AK/Weakable.h>
  21. #include <Kernel/Arch/x86/SafeMem.h>
  22. #include <Kernel/Debug.h>
  23. #include <Kernel/FileSystem/InodeIdentifier.h>
  24. #include <Kernel/Forward.h>
  25. #include <Kernel/KString.h>
  26. #include <Kernel/Library/ListedRefCounted.h>
  27. #include <Kernel/Locking/LockLocation.h>
  28. #include <Kernel/Locking/LockMode.h>
  29. #include <Kernel/Locking/LockRank.h>
  30. #include <Kernel/Locking/SpinlockProtected.h>
  31. #include <Kernel/Memory/VirtualRange.h>
  32. #include <Kernel/Scheduler.h>
  33. #include <Kernel/TimerQueue.h>
  34. #include <Kernel/UnixTypes.h>
  35. #include <LibC/fd_set.h>
  36. #include <LibC/signal_numbers.h>
  37. namespace Kernel {
  38. namespace Memory {
  39. extern RecursiveSpinlock s_mm_lock;
  40. }
  41. enum class DispatchSignalResult {
  42. Deferred = 0,
  43. Yield,
  44. Terminate,
  45. Continue
  46. };
  47. struct SignalActionData {
  48. VirtualAddress handler_or_sigaction;
  49. u32 mask { 0 };
  50. int flags { 0 };
  51. };
  52. struct ThreadSpecificData {
  53. ThreadSpecificData* self;
  54. };
  55. #define THREAD_PRIORITY_MIN 1
  56. #define THREAD_PRIORITY_LOW 10
  57. #define THREAD_PRIORITY_NORMAL 30
  58. #define THREAD_PRIORITY_HIGH 50
  59. #define THREAD_PRIORITY_MAX 99
  60. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  61. struct ThreadRegisters {
  62. #if ARCH(I386)
  63. FlatPtr ss;
  64. FlatPtr gs;
  65. FlatPtr fs;
  66. FlatPtr es;
  67. FlatPtr ds;
  68. FlatPtr edi;
  69. FlatPtr esi;
  70. FlatPtr ebp;
  71. FlatPtr esp;
  72. FlatPtr ebx;
  73. FlatPtr edx;
  74. FlatPtr ecx;
  75. FlatPtr eax;
  76. FlatPtr eip;
  77. FlatPtr esp0;
  78. FlatPtr ss0;
  79. #else
  80. FlatPtr rdi;
  81. FlatPtr rsi;
  82. FlatPtr rbp;
  83. FlatPtr rsp;
  84. FlatPtr rbx;
  85. FlatPtr rdx;
  86. FlatPtr rcx;
  87. FlatPtr rax;
  88. FlatPtr r8;
  89. FlatPtr r9;
  90. FlatPtr r10;
  91. FlatPtr r11;
  92. FlatPtr r12;
  93. FlatPtr r13;
  94. FlatPtr r14;
  95. FlatPtr r15;
  96. FlatPtr rip;
  97. FlatPtr rsp0;
  98. #endif
  99. FlatPtr cs;
  100. #if ARCH(I386)
  101. FlatPtr eflags;
  102. FlatPtr flags() const { return eflags; }
  103. void set_flags(FlatPtr value) { eflags = value; }
  104. void set_sp(FlatPtr value) { esp = value; }
  105. void set_sp0(FlatPtr value) { esp0 = value; }
  106. void set_ip(FlatPtr value) { eip = value; }
  107. #else
  108. FlatPtr rflags;
  109. FlatPtr flags() const { return rflags; }
  110. void set_flags(FlatPtr value) { rflags = value; }
  111. void set_sp(FlatPtr value) { rsp = value; }
  112. void set_sp0(FlatPtr value) { rsp0 = value; }
  113. void set_ip(FlatPtr value) { rip = value; }
  114. #endif
  115. FlatPtr cr3;
  116. FlatPtr ip() const
  117. {
  118. #if ARCH(I386)
  119. return eip;
  120. #else
  121. return rip;
  122. #endif
  123. }
  124. FlatPtr sp() const
  125. {
  126. #if ARCH(I386)
  127. return esp;
  128. #else
  129. return rsp;
  130. #endif
  131. }
  132. };
  133. class Thread
  134. : public ListedRefCounted<Thread, LockType::Spinlock>
  135. , public Weakable<Thread> {
  136. AK_MAKE_NONCOPYABLE(Thread);
  137. AK_MAKE_NONMOVABLE(Thread);
  138. friend class Mutex;
  139. friend class Process;
  140. friend class Scheduler;
  141. friend struct ThreadReadyQueue;
  142. public:
  143. inline static Thread* current()
  144. {
  145. return Processor::current_thread();
  146. }
  147. static ErrorOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  148. ~Thread();
  149. static RefPtr<Thread> from_tid(ThreadID);
  150. static void finalize_dying_threads();
  151. ThreadID tid() const { return m_tid; }
  152. ProcessID pid() const;
  153. void set_priority(u32 p) { m_priority = p; }
  154. u32 priority() const { return m_priority; }
  155. void detach()
  156. {
  157. SpinlockLocker lock(m_lock);
  158. m_is_joinable = false;
  159. }
  160. [[nodiscard]] bool is_joinable() const
  161. {
  162. SpinlockLocker lock(m_lock);
  163. return m_is_joinable;
  164. }
  165. Process& process() { return m_process; }
  166. const Process& process() const { return m_process; }
  167. // NOTE: This returns a null-terminated string.
  168. StringView name() const
  169. {
  170. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  171. VERIFY(m_lock.is_locked_by_current_processor());
  172. return m_name->view();
  173. }
  174. void set_name(NonnullOwnPtr<KString> name)
  175. {
  176. SpinlockLocker lock(m_lock);
  177. m_name = move(name);
  178. }
  179. void finalize();
  180. enum State : u8 {
  181. Invalid = 0,
  182. Runnable,
  183. Running,
  184. Dying,
  185. Dead,
  186. Stopped,
  187. Blocked
  188. };
  189. class [[nodiscard]] BlockResult {
  190. public:
  191. enum Type {
  192. WokeNormally,
  193. NotBlocked,
  194. InterruptedBySignal,
  195. InterruptedByDeath,
  196. InterruptedByTimeout,
  197. };
  198. BlockResult() = delete;
  199. BlockResult(Type type)
  200. : m_type(type)
  201. {
  202. }
  203. bool operator==(Type type) const
  204. {
  205. return m_type == type;
  206. }
  207. bool operator!=(Type type) const
  208. {
  209. return m_type != type;
  210. }
  211. [[nodiscard]] bool was_interrupted() const
  212. {
  213. switch (m_type) {
  214. case InterruptedBySignal:
  215. case InterruptedByDeath:
  216. return true;
  217. default:
  218. return false;
  219. }
  220. }
  221. private:
  222. Type m_type;
  223. };
  224. class BlockTimeout {
  225. public:
  226. BlockTimeout()
  227. : m_infinite(true)
  228. {
  229. }
  230. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  231. const Time& absolute_time() const { return m_time; }
  232. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  233. clockid_t clock_id() const { return m_clock_id; }
  234. bool is_infinite() const { return m_infinite; }
  235. private:
  236. Time m_time {};
  237. Time m_start_time {};
  238. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  239. bool m_infinite { false };
  240. };
  241. class BlockerSet;
  242. class Blocker {
  243. AK_MAKE_NONMOVABLE(Blocker);
  244. AK_MAKE_NONCOPYABLE(Blocker);
  245. public:
  246. enum class Type {
  247. Unknown = 0,
  248. File,
  249. Futex,
  250. Plan9FS,
  251. Join,
  252. Queue,
  253. Routing,
  254. Sleep,
  255. Signal,
  256. Wait
  257. };
  258. virtual ~Blocker();
  259. virtual StringView state_string() const = 0;
  260. virtual Type blocker_type() const = 0;
  261. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  262. virtual bool can_be_interrupted() const { return true; }
  263. virtual bool setup_blocker();
  264. Thread& thread() { return m_thread; }
  265. enum class UnblockImmediatelyReason {
  266. UnblockConditionAlreadyMet,
  267. TimeoutInThePast,
  268. };
  269. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
  270. virtual void was_unblocked(bool did_timeout)
  271. {
  272. if (did_timeout) {
  273. SpinlockLocker lock(m_lock);
  274. m_did_timeout = true;
  275. }
  276. }
  277. void set_interrupted_by_death()
  278. {
  279. SpinlockLocker lock(m_lock);
  280. do_set_interrupted_by_death();
  281. }
  282. void set_interrupted_by_signal(u8 signal)
  283. {
  284. SpinlockLocker lock(m_lock);
  285. do_set_interrupted_by_signal(signal);
  286. }
  287. u8 was_interrupted_by_signal() const
  288. {
  289. SpinlockLocker lock(m_lock);
  290. return do_get_interrupted_by_signal();
  291. }
  292. virtual Thread::BlockResult block_result()
  293. {
  294. SpinlockLocker lock(m_lock);
  295. if (m_was_interrupted_by_death)
  296. return Thread::BlockResult::InterruptedByDeath;
  297. if (m_was_interrupted_by_signal != 0)
  298. return Thread::BlockResult::InterruptedBySignal;
  299. if (m_did_timeout)
  300. return Thread::BlockResult::InterruptedByTimeout;
  301. return Thread::BlockResult::WokeNormally;
  302. }
  303. void begin_blocking(Badge<Thread>);
  304. BlockResult end_blocking(Badge<Thread>, bool);
  305. protected:
  306. Blocker()
  307. : m_thread(*Thread::current())
  308. {
  309. }
  310. void do_set_interrupted_by_death()
  311. {
  312. m_was_interrupted_by_death = true;
  313. }
  314. void do_set_interrupted_by_signal(u8 signal)
  315. {
  316. VERIFY(signal != 0);
  317. m_was_interrupted_by_signal = signal;
  318. }
  319. void do_clear_interrupted_by_signal()
  320. {
  321. m_was_interrupted_by_signal = 0;
  322. }
  323. u8 do_get_interrupted_by_signal() const
  324. {
  325. return m_was_interrupted_by_signal;
  326. }
  327. [[nodiscard]] bool was_interrupted() const
  328. {
  329. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  330. }
  331. void unblock_from_blocker()
  332. {
  333. {
  334. SpinlockLocker lock(m_lock);
  335. if (!m_is_blocking)
  336. return;
  337. m_is_blocking = false;
  338. }
  339. m_thread->unblock_from_blocker(*this);
  340. }
  341. bool add_to_blocker_set(BlockerSet&, void* = nullptr);
  342. void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
  343. mutable RecursiveSpinlock m_lock;
  344. private:
  345. BlockerSet* m_blocker_set { nullptr };
  346. NonnullRefPtr<Thread> m_thread;
  347. u8 m_was_interrupted_by_signal { 0 };
  348. bool m_is_blocking { false };
  349. bool m_was_interrupted_by_death { false };
  350. bool m_did_timeout { false };
  351. };
  352. class BlockerSet {
  353. AK_MAKE_NONCOPYABLE(BlockerSet);
  354. AK_MAKE_NONMOVABLE(BlockerSet);
  355. public:
  356. BlockerSet() = default;
  357. virtual ~BlockerSet()
  358. {
  359. VERIFY(!m_lock.is_locked());
  360. VERIFY(m_blockers.is_empty());
  361. }
  362. bool add_blocker(Blocker& blocker, void* data)
  363. {
  364. SpinlockLocker lock(m_lock);
  365. if (!should_add_blocker(blocker, data))
  366. return false;
  367. m_blockers.append({ &blocker, data });
  368. return true;
  369. }
  370. void remove_blocker(Blocker& blocker)
  371. {
  372. SpinlockLocker lock(m_lock);
  373. // NOTE: it's possible that the blocker is no longer present
  374. m_blockers.remove_all_matching([&](auto& info) {
  375. return info.blocker == &blocker;
  376. });
  377. }
  378. bool is_empty() const
  379. {
  380. SpinlockLocker lock(m_lock);
  381. return is_empty_locked();
  382. }
  383. protected:
  384. template<typename Callback>
  385. bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
  386. {
  387. SpinlockLocker lock(m_lock);
  388. return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
  389. }
  390. template<typename Callback>
  391. bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
  392. {
  393. VERIFY(m_lock.is_locked());
  394. bool stop_iterating = false;
  395. bool did_unblock_any = false;
  396. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  397. auto& info = m_blockers[i];
  398. if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
  399. m_blockers.remove(i);
  400. did_unblock_any = true;
  401. continue;
  402. }
  403. i++;
  404. }
  405. return did_unblock_any;
  406. }
  407. bool is_empty_locked() const
  408. {
  409. VERIFY(m_lock.is_locked());
  410. return m_blockers.is_empty();
  411. }
  412. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  413. struct BlockerInfo {
  414. Blocker* blocker;
  415. void* data;
  416. };
  417. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  418. {
  419. if (m_blockers.size() <= count)
  420. return move(m_blockers);
  421. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  422. VERIFY(move_count > 0);
  423. Vector<BlockerInfo, 4> taken_blockers;
  424. taken_blockers.ensure_capacity(move_count);
  425. for (size_t i = 0; i < move_count; i++)
  426. taken_blockers.append(m_blockers.take(i));
  427. m_blockers.remove(0, move_count);
  428. return taken_blockers;
  429. }
  430. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  431. {
  432. if (blockers_to_append.is_empty())
  433. return;
  434. if (m_blockers.is_empty()) {
  435. m_blockers = move(blockers_to_append);
  436. return;
  437. }
  438. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  439. for (size_t i = 0; i < blockers_to_append.size(); i++)
  440. m_blockers.append(blockers_to_append.take(i));
  441. blockers_to_append.clear();
  442. }
  443. mutable Spinlock m_lock;
  444. private:
  445. Vector<BlockerInfo, 4> m_blockers;
  446. };
  447. friend class JoinBlocker;
  448. class JoinBlocker final : public Blocker {
  449. public:
  450. explicit JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value);
  451. virtual Type blocker_type() const override { return Type::Join; }
  452. virtual StringView state_string() const override { return "Joining"sv; }
  453. virtual bool can_be_interrupted() const override { return false; }
  454. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  455. virtual bool setup_blocker() override;
  456. bool unblock(void*, bool);
  457. private:
  458. NonnullRefPtr<Thread> m_joinee;
  459. void*& m_joinee_exit_value;
  460. ErrorOr<void>& m_try_join_result;
  461. bool m_did_unblock { false };
  462. };
  463. class WaitQueueBlocker final : public Blocker {
  464. public:
  465. explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
  466. virtual ~WaitQueueBlocker();
  467. virtual Type blocker_type() const override { return Type::Queue; }
  468. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  469. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  470. virtual bool setup_blocker() override;
  471. bool unblock();
  472. protected:
  473. WaitQueue& m_wait_queue;
  474. StringView m_block_reason;
  475. bool m_did_unblock { false };
  476. };
  477. class FutexBlocker final : public Blocker {
  478. public:
  479. explicit FutexBlocker(FutexQueue&, u32);
  480. virtual ~FutexBlocker();
  481. virtual Type blocker_type() const override { return Type::Futex; }
  482. virtual StringView state_string() const override { return "Futex"sv; }
  483. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  484. virtual bool setup_blocker() override;
  485. u32 bitset() const { return m_bitset; }
  486. void begin_requeue()
  487. {
  488. // We need to hold the lock until we moved it over
  489. m_relock_flags = m_lock.lock();
  490. }
  491. void finish_requeue(FutexQueue&);
  492. bool unblock_bitset(u32 bitset);
  493. bool unblock(bool force = false);
  494. protected:
  495. FutexQueue& m_futex_queue;
  496. u32 m_bitset { 0 };
  497. u32 m_relock_flags { 0 };
  498. bool m_did_unblock { false };
  499. };
  500. class FileBlocker : public Blocker {
  501. public:
  502. enum class BlockFlags : u16 {
  503. None = 0,
  504. Read = 1 << 0,
  505. Write = 1 << 1,
  506. ReadPriority = 1 << 2,
  507. WritePriority = 1 << 3,
  508. Accept = 1 << 4,
  509. Connect = 1 << 5,
  510. SocketFlags = Accept | Connect,
  511. WriteNotOpen = 1 << 6,
  512. WriteError = 1 << 7,
  513. WriteHangUp = 1 << 8,
  514. ReadHangUp = 1 << 9,
  515. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  516. };
  517. virtual Type blocker_type() const override { return Type::File; }
  518. virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
  519. };
  520. class OpenFileDescriptionBlocker : public FileBlocker {
  521. public:
  522. const OpenFileDescription& blocked_description() const;
  523. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  524. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  525. virtual bool setup_blocker() override;
  526. protected:
  527. explicit OpenFileDescriptionBlocker(OpenFileDescription&, BlockFlags, BlockFlags&);
  528. private:
  529. NonnullRefPtr<OpenFileDescription> m_blocked_description;
  530. const BlockFlags m_flags;
  531. BlockFlags& m_unblocked_flags;
  532. bool m_did_unblock { false };
  533. };
  534. class AcceptBlocker final : public OpenFileDescriptionBlocker {
  535. public:
  536. explicit AcceptBlocker(OpenFileDescription&, BlockFlags&);
  537. virtual StringView state_string() const override { return "Accepting"sv; }
  538. };
  539. class ConnectBlocker final : public OpenFileDescriptionBlocker {
  540. public:
  541. explicit ConnectBlocker(OpenFileDescription&, BlockFlags&);
  542. virtual StringView state_string() const override { return "Connecting"sv; }
  543. };
  544. class WriteBlocker final : public OpenFileDescriptionBlocker {
  545. public:
  546. explicit WriteBlocker(OpenFileDescription&, BlockFlags&);
  547. virtual StringView state_string() const override { return "Writing"sv; }
  548. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  549. private:
  550. BlockTimeout m_timeout;
  551. };
  552. class ReadBlocker final : public OpenFileDescriptionBlocker {
  553. public:
  554. explicit ReadBlocker(OpenFileDescription&, BlockFlags&);
  555. virtual StringView state_string() const override { return "Reading"sv; }
  556. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  557. private:
  558. BlockTimeout m_timeout;
  559. };
  560. class SleepBlocker final : public Blocker {
  561. public:
  562. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  563. virtual StringView state_string() const override { return "Sleeping"sv; }
  564. virtual Type blocker_type() const override { return Type::Sleep; }
  565. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  566. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  567. virtual void was_unblocked(bool) override;
  568. virtual Thread::BlockResult block_result() override;
  569. private:
  570. void calculate_remaining();
  571. BlockTimeout m_deadline;
  572. Time* m_remaining;
  573. };
  574. class SelectBlocker final : public FileBlocker {
  575. public:
  576. struct FDInfo {
  577. NonnullRefPtr<OpenFileDescription> description;
  578. BlockFlags block_flags { BlockFlags::None };
  579. BlockFlags unblocked_flags { BlockFlags::None };
  580. };
  581. using FDVector = Vector<FDInfo, FD_SETSIZE>;
  582. explicit SelectBlocker(FDVector&);
  583. virtual ~SelectBlocker();
  584. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  585. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  586. virtual void was_unblocked(bool) override;
  587. virtual StringView state_string() const override { return "Selecting"sv; }
  588. virtual bool setup_blocker() override;
  589. private:
  590. size_t collect_unblocked_flags();
  591. FDVector& m_fds;
  592. bool m_did_unblock { false };
  593. };
  594. class SignalBlocker final : public Blocker {
  595. public:
  596. explicit SignalBlocker(sigset_t pending_set, siginfo_t& result);
  597. virtual StringView state_string() const override { return "Pending Signal"sv; }
  598. virtual Type blocker_type() const override { return Type::Signal; }
  599. void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  600. virtual bool setup_blocker() override;
  601. bool check_pending_signals(bool from_add_blocker);
  602. private:
  603. sigset_t m_pending_set { 0 };
  604. siginfo_t& m_result;
  605. bool m_did_unblock { false };
  606. };
  607. class SignalBlockerSet final : public BlockerSet {
  608. public:
  609. void unblock_all_blockers_whose_conditions_are_met()
  610. {
  611. BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
  612. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  613. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  614. return blocker.check_pending_signals(false);
  615. });
  616. }
  617. private:
  618. bool should_add_blocker(Blocker& b, void*) override
  619. {
  620. VERIFY(b.blocker_type() == Blocker::Type::Signal);
  621. auto& blocker = static_cast<Thread::SignalBlocker&>(b);
  622. return !blocker.check_pending_signals(true);
  623. }
  624. };
  625. class WaitBlocker final : public Blocker {
  626. public:
  627. enum class UnblockFlags {
  628. Terminated,
  629. Stopped,
  630. Continued,
  631. Disowned
  632. };
  633. WaitBlocker(int wait_options, Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result);
  634. virtual StringView state_string() const override { return "Waiting"sv; }
  635. virtual Type blocker_type() const override { return Type::Wait; }
  636. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  637. virtual void was_unblocked(bool) override;
  638. virtual bool setup_blocker() override;
  639. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  640. bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
  641. private:
  642. void do_was_disowned();
  643. void do_set_result(const siginfo_t&);
  644. const int m_wait_options;
  645. ErrorOr<siginfo_t>& m_result;
  646. Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> m_waitee;
  647. bool m_did_unblock { false };
  648. bool m_got_sigchild { false };
  649. };
  650. class WaitBlockerSet final : public BlockerSet {
  651. friend class WaitBlocker;
  652. public:
  653. explicit WaitBlockerSet(Process& process)
  654. : m_process(process)
  655. {
  656. }
  657. void disowned_by_waiter(Process&);
  658. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  659. void try_unblock(WaitBlocker&);
  660. void finalize();
  661. protected:
  662. virtual bool should_add_blocker(Blocker&, void*) override;
  663. private:
  664. struct ProcessBlockInfo {
  665. NonnullRefPtr<Process> process;
  666. WaitBlocker::UnblockFlags flags;
  667. u8 signal;
  668. bool was_waited { false };
  669. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  670. ~ProcessBlockInfo();
  671. };
  672. Process& m_process;
  673. Vector<ProcessBlockInfo, 2> m_processes;
  674. bool m_finalized { false };
  675. };
  676. template<typename AddBlockerHandler>
  677. ErrorOr<void> try_join(AddBlockerHandler add_blocker)
  678. {
  679. if (Thread::current() == this)
  680. return EDEADLK;
  681. SpinlockLocker lock(m_lock);
  682. if (!m_is_joinable || state() == Dead)
  683. return EINVAL;
  684. add_blocker();
  685. // From this point on the thread is no longer joinable by anyone
  686. // else. It also means that if the join is timed, it becomes
  687. // detached when a timeout happens.
  688. m_is_joinable = false;
  689. return {};
  690. }
  691. void did_schedule() { ++m_times_scheduled; }
  692. u32 times_scheduled() const { return m_times_scheduled; }
  693. void resume_from_stopped();
  694. [[nodiscard]] bool should_be_stopped() const;
  695. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  696. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  697. [[nodiscard]] bool is_in_block() const
  698. {
  699. SpinlockLocker lock(m_block_lock);
  700. return m_in_block;
  701. }
  702. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  703. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  704. u32 affinity() const { return m_cpu_affinity; }
  705. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  706. RegisterState& get_register_dump_from_stack();
  707. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  708. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  709. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  710. ThreadRegisters& regs() { return m_regs; }
  711. ThreadRegisters const& regs() const { return m_regs; }
  712. State state() const { return m_state; }
  713. StringView state_string() const;
  714. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  715. size_t thread_specific_region_size() const;
  716. size_t thread_specific_region_alignment() const;
  717. ALWAYS_INLINE void yield_if_stopped()
  718. {
  719. // If some thread stopped us, we need to yield to someone else
  720. // We check this when entering/exiting a system call. A thread
  721. // may continue to execute in user land until the next timer
  722. // tick or entering the next system call, or if it's in kernel
  723. // mode then we will intercept prior to returning back to user
  724. // mode.
  725. SpinlockLocker lock(m_lock);
  726. while (state() == Thread::Stopped) {
  727. lock.unlock();
  728. // We shouldn't be holding the big lock here
  729. yield_without_releasing_big_lock();
  730. lock.lock();
  731. }
  732. }
  733. void block(Kernel::Mutex&, SpinlockLocker<Spinlock>&, u32);
  734. template<typename BlockerType, class... Args>
  735. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  736. {
  737. VERIFY(!Processor::current_in_irq());
  738. VERIFY(this == Thread::current());
  739. ScopedCritical critical;
  740. VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
  741. SpinlockLocker block_lock(m_block_lock);
  742. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  743. // as it is constructed and registered elsewhere
  744. VERIFY(!m_in_block);
  745. TemporaryChange in_block_change(m_in_block, true);
  746. BlockerType blocker(forward<Args>(args)...);
  747. if (!blocker.setup_blocker()) {
  748. blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::UnblockConditionAlreadyMet);
  749. return BlockResult::NotBlocked;
  750. }
  751. SpinlockLocker scheduler_lock(g_scheduler_lock);
  752. // Relaxed semantics are fine for timeout_unblocked because we
  753. // synchronize on the spin locks already.
  754. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  755. bool timer_was_added = false;
  756. switch (state()) {
  757. case Thread::Stopped:
  758. // It's possible that we were requested to be stopped!
  759. break;
  760. case Thread::Running:
  761. VERIFY(m_blocker == nullptr);
  762. break;
  763. default:
  764. VERIFY_NOT_REACHED();
  765. }
  766. m_blocker = &blocker;
  767. if (auto& block_timeout = blocker.override_timeout(timeout); !block_timeout.is_infinite()) {
  768. // Process::kill_all_threads may be called at any time, which will mark all
  769. // threads to die. In that case
  770. timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  771. VERIFY(!Processor::current_in_irq());
  772. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  773. VERIFY(!m_block_lock.is_locked_by_current_processor());
  774. // NOTE: this may execute on the same or any other processor!
  775. SpinlockLocker scheduler_lock(g_scheduler_lock);
  776. SpinlockLocker block_lock(m_block_lock);
  777. if (m_blocker && !timeout_unblocked.exchange(true))
  778. unblock();
  779. });
  780. if (!timer_was_added) {
  781. // Timeout is already in the past
  782. blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::TimeoutInThePast);
  783. m_blocker = nullptr;
  784. return BlockResult::InterruptedByTimeout;
  785. }
  786. }
  787. blocker.begin_blocking({});
  788. set_state(Thread::Blocked);
  789. scheduler_lock.unlock();
  790. block_lock.unlock();
  791. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
  792. bool did_timeout = false;
  793. u32 lock_count_to_restore = 0;
  794. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  795. for (;;) {
  796. // Yield to the scheduler, and wait for us to resume unblocked.
  797. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  798. VERIFY(Processor::in_critical());
  799. yield_without_releasing_big_lock();
  800. VERIFY(Processor::in_critical());
  801. SpinlockLocker block_lock2(m_block_lock);
  802. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  803. block_lock2.unlock();
  804. dbgln("Thread should not be unblocking, current state: {}", state_string());
  805. set_state(Thread::Blocked);
  806. continue;
  807. }
  808. // Prevent the timeout from unblocking this thread if it happens to
  809. // be in the process of firing already
  810. did_timeout |= timeout_unblocked.exchange(true);
  811. if (m_blocker) {
  812. // Remove ourselves...
  813. VERIFY(m_blocker == &blocker);
  814. m_blocker = nullptr;
  815. }
  816. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
  817. break;
  818. }
  819. if (blocker.was_interrupted_by_signal()) {
  820. SpinlockLocker scheduler_lock(g_scheduler_lock);
  821. SpinlockLocker lock(m_lock);
  822. dispatch_one_pending_signal();
  823. }
  824. // Notify the blocker that we are no longer blocking. It may need
  825. // to clean up now while we're still holding m_lock
  826. auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
  827. if (timer_was_added && !did_timeout) {
  828. // Cancel the timer while not holding any locks. This allows
  829. // the timer function to complete before we remove it
  830. // (e.g. if it's on another processor)
  831. TimerQueue::the().cancel_timer(*m_block_timer);
  832. }
  833. if (previous_locked != LockMode::Unlocked) {
  834. // NOTE: this may trigger another call to Thread::block(), so
  835. // we need to do this after we're all done and restored m_in_block!
  836. relock_process(previous_locked, lock_count_to_restore);
  837. }
  838. return result;
  839. }
  840. u32 unblock_from_lock(Kernel::Mutex&);
  841. void unblock_from_blocker(Blocker&);
  842. void unblock(u8 signal = 0);
  843. template<class... Args>
  844. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  845. {
  846. VERIFY(this == Thread::current());
  847. return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  848. }
  849. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  850. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  851. {
  852. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  853. }
  854. BlockResult sleep_until(clockid_t, const Time&);
  855. BlockResult sleep_until(const Time& duration)
  856. {
  857. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  858. }
  859. // Tell this thread to unblock if needed,
  860. // gracefully unwind the stack and die.
  861. void set_should_die();
  862. [[nodiscard]] bool should_die() const { return m_should_die; }
  863. void die_if_needed();
  864. void exit(void* = nullptr);
  865. void update_time_scheduled(u64, bool, bool);
  866. bool tick();
  867. void set_ticks_left(u32 t) { m_ticks_left = t; }
  868. u32 ticks_left() const { return m_ticks_left; }
  869. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  870. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  871. void set_state(State, u8 = 0);
  872. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  873. void set_initialized(bool initialized) { m_initialized = initialized; }
  874. void send_urgent_signal_to_self(u8 signal);
  875. void send_signal(u8 signal, Process* sender);
  876. u32 update_signal_mask(u32 signal_mask);
  877. u32 signal_mask_block(sigset_t signal_set, bool block);
  878. u32 signal_mask() const;
  879. void reset_signals_for_exec();
  880. ErrorOr<FlatPtr> peek_debug_register(u32 register_index);
  881. ErrorOr<void> poke_debug_register(u32 register_index, FlatPtr data);
  882. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  883. DispatchSignalResult dispatch_one_pending_signal();
  884. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  885. DispatchSignalResult dispatch_signal(u8 signal);
  886. void check_dispatch_pending_signal();
  887. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  888. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  889. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  890. [[nodiscard]] bool is_signal_masked(u8 signal) const;
  891. u32 pending_signals() const;
  892. u32 pending_signals_for_state() const;
  893. [[nodiscard]] bool has_alternative_signal_stack() const;
  894. [[nodiscard]] bool is_in_alternative_signal_stack() const;
  895. FPUState& fpu_state() { return m_fpu_state; }
  896. ErrorOr<void> make_thread_specific_region(Badge<Process>);
  897. unsigned syscall_count() const { return m_syscall_count; }
  898. void did_syscall() { ++m_syscall_count; }
  899. unsigned inode_faults() const { return m_inode_faults; }
  900. void did_inode_fault() { ++m_inode_faults; }
  901. unsigned zero_faults() const { return m_zero_faults; }
  902. void did_zero_fault() { ++m_zero_faults; }
  903. unsigned cow_faults() const { return m_cow_faults; }
  904. void did_cow_fault() { ++m_cow_faults; }
  905. unsigned file_read_bytes() const { return m_file_read_bytes; }
  906. unsigned file_write_bytes() const { return m_file_write_bytes; }
  907. void did_file_read(unsigned bytes)
  908. {
  909. m_file_read_bytes += bytes;
  910. }
  911. void did_file_write(unsigned bytes)
  912. {
  913. m_file_write_bytes += bytes;
  914. }
  915. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  916. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  917. void did_unix_socket_read(unsigned bytes)
  918. {
  919. m_unix_socket_read_bytes += bytes;
  920. }
  921. void did_unix_socket_write(unsigned bytes)
  922. {
  923. m_unix_socket_write_bytes += bytes;
  924. }
  925. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  926. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  927. void did_ipv4_socket_read(unsigned bytes)
  928. {
  929. m_ipv4_socket_read_bytes += bytes;
  930. }
  931. void did_ipv4_socket_write(unsigned bytes)
  932. {
  933. m_ipv4_socket_write_bytes += bytes;
  934. }
  935. void set_active(bool active) { m_is_active = active; }
  936. u32 saved_critical() const { return m_saved_critical; }
  937. void save_critical(u32 critical) { m_saved_critical = critical; }
  938. void track_lock_acquire(LockRank rank);
  939. void track_lock_release(LockRank rank);
  940. [[nodiscard]] bool is_active() const { return m_is_active; }
  941. [[nodiscard]] bool is_finalizable() const
  942. {
  943. // We can't finalize as long as this thread is still running
  944. // Note that checking for Running state here isn't sufficient
  945. // as the thread may not be in Running state but switching out.
  946. // m_is_active is set to false once the context switch is
  947. // complete and the thread is not executing on any processor.
  948. if (m_is_active.load(AK::memory_order_acquire))
  949. return false;
  950. // We can't finalize until the thread is either detached or
  951. // a join has started. We can't make m_is_joinable atomic
  952. // because that would introduce a race in try_join.
  953. SpinlockLocker lock(m_lock);
  954. return !m_is_joinable;
  955. }
  956. ErrorOr<NonnullRefPtr<Thread>> try_clone(Process&);
  957. template<IteratorFunction<Thread&> Callback>
  958. static IterationDecision for_each_in_state(State, Callback);
  959. template<IteratorFunction<Thread&> Callback>
  960. static IterationDecision for_each(Callback);
  961. template<VoidFunction<Thread&> Callback>
  962. static IterationDecision for_each_in_state(State, Callback);
  963. template<VoidFunction<Thread&> Callback>
  964. static IterationDecision for_each(Callback);
  965. static constexpr u32 default_kernel_stack_size = 65536;
  966. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  967. u64 time_in_user() const { return m_total_time_scheduled_user; }
  968. u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
  969. enum class PreviousMode : u8 {
  970. KernelMode = 0,
  971. UserMode
  972. };
  973. PreviousMode previous_mode() const { return m_previous_mode; }
  974. bool set_previous_mode(PreviousMode mode)
  975. {
  976. if (m_previous_mode == mode)
  977. return false;
  978. m_previous_mode = mode;
  979. return true;
  980. }
  981. TrapFrame*& current_trap() { return m_current_trap; }
  982. TrapFrame const* const& current_trap() const { return m_current_trap; }
  983. RecursiveSpinlock& get_lock() const { return m_lock; }
  984. #if LOCK_DEBUG
  985. void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
  986. {
  987. VERIFY(refs_delta != 0);
  988. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  989. SpinlockLocker list_lock(m_holding_locks_lock);
  990. if (refs_delta > 0) {
  991. bool have_existing = false;
  992. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  993. auto& info = m_holding_locks_list[i];
  994. if (info.lock == &lock) {
  995. have_existing = true;
  996. info.count += refs_delta;
  997. break;
  998. }
  999. }
  1000. if (!have_existing)
  1001. m_holding_locks_list.append({ &lock, location, 1 });
  1002. } else {
  1003. VERIFY(refs_delta < 0);
  1004. bool found = false;
  1005. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  1006. auto& info = m_holding_locks_list[i];
  1007. if (info.lock == &lock) {
  1008. VERIFY(info.count >= (unsigned)-refs_delta);
  1009. info.count -= (unsigned)-refs_delta;
  1010. if (info.count == 0)
  1011. m_holding_locks_list.remove(i);
  1012. found = true;
  1013. break;
  1014. }
  1015. }
  1016. VERIFY(found);
  1017. }
  1018. }
  1019. u32 lock_count() const
  1020. {
  1021. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  1022. }
  1023. #endif
  1024. bool is_handling_page_fault() const
  1025. {
  1026. return m_handling_page_fault;
  1027. }
  1028. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  1029. void set_idle_thread() { m_is_idle_thread = true; }
  1030. bool is_idle_thread() const { return m_is_idle_thread; }
  1031. void set_crashing() { m_is_crashing = true; }
  1032. [[nodiscard]] bool is_crashing() const { return m_is_crashing; }
  1033. ALWAYS_INLINE u32 enter_profiler()
  1034. {
  1035. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  1036. }
  1037. ALWAYS_INLINE u32 leave_profiler()
  1038. {
  1039. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  1040. }
  1041. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  1042. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  1043. bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
  1044. void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
  1045. String backtrace();
  1046. private:
  1047. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, NonnullOwnPtr<KString>);
  1048. IntrusiveListNode<Thread> m_process_thread_list_node;
  1049. int m_runnable_priority { -1 };
  1050. friend class WaitQueue;
  1051. class JoinBlockerSet final : public BlockerSet {
  1052. public:
  1053. void thread_did_exit(void* exit_value)
  1054. {
  1055. SpinlockLocker lock(m_lock);
  1056. VERIFY(!m_thread_did_exit);
  1057. m_thread_did_exit = true;
  1058. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1059. do_unblock_joiner();
  1060. }
  1061. void thread_finalizing()
  1062. {
  1063. SpinlockLocker lock(m_lock);
  1064. do_unblock_joiner();
  1065. }
  1066. void* exit_value() const
  1067. {
  1068. VERIFY(m_thread_did_exit);
  1069. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1070. }
  1071. void try_unblock(JoinBlocker& blocker)
  1072. {
  1073. SpinlockLocker lock(m_lock);
  1074. if (m_thread_did_exit)
  1075. blocker.unblock(exit_value(), false);
  1076. }
  1077. protected:
  1078. virtual bool should_add_blocker(Blocker& b, void*) override
  1079. {
  1080. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1081. auto& blocker = static_cast<JoinBlocker&>(b);
  1082. // NOTE: m_lock is held already!
  1083. if (m_thread_did_exit) {
  1084. blocker.unblock(exit_value(), true);
  1085. return false;
  1086. }
  1087. return true;
  1088. }
  1089. private:
  1090. void do_unblock_joiner()
  1091. {
  1092. unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
  1093. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1094. auto& blocker = static_cast<JoinBlocker&>(b);
  1095. return blocker.unblock(exit_value(), false);
  1096. });
  1097. }
  1098. Atomic<void*> m_exit_value { nullptr };
  1099. bool m_thread_did_exit { false };
  1100. };
  1101. LockMode unlock_process_if_locked(u32&);
  1102. void relock_process(LockMode, u32);
  1103. void reset_fpu_state();
  1104. mutable RecursiveSpinlock m_lock { LockRank::Thread };
  1105. mutable RecursiveSpinlock m_block_lock;
  1106. NonnullRefPtr<Process> m_process;
  1107. ThreadID m_tid { -1 };
  1108. ThreadRegisters m_regs {};
  1109. DebugRegisterState m_debug_register_state {};
  1110. TrapFrame* m_current_trap { nullptr };
  1111. u32 m_saved_critical { 1 };
  1112. IntrusiveListNode<Thread> m_ready_queue_node;
  1113. Atomic<u32> m_cpu { 0 };
  1114. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1115. Optional<u64> m_last_time_scheduled;
  1116. u64 m_total_time_scheduled_user { 0 };
  1117. u64 m_total_time_scheduled_kernel { 0 };
  1118. u32 m_ticks_left { 0 };
  1119. u32 m_times_scheduled { 0 };
  1120. u32 m_ticks_in_user { 0 };
  1121. u32 m_ticks_in_kernel { 0 };
  1122. u32 m_pending_signals { 0 };
  1123. u32 m_signal_mask { 0 };
  1124. FlatPtr m_alternative_signal_stack { 0 };
  1125. FlatPtr m_alternative_signal_stack_size { 0 };
  1126. SignalBlockerSet m_signal_blocker_set;
  1127. FlatPtr m_kernel_stack_base { 0 };
  1128. FlatPtr m_kernel_stack_top { 0 };
  1129. NonnullOwnPtr<Memory::Region> m_kernel_stack_region;
  1130. VirtualAddress m_thread_specific_data;
  1131. Optional<Memory::VirtualRange> m_thread_specific_range;
  1132. Array<SignalActionData, NSIG> m_signal_action_data;
  1133. Blocker* m_blocker { nullptr };
  1134. Kernel::Mutex* m_blocking_lock { nullptr };
  1135. u32 m_lock_requested_count { 0 };
  1136. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1137. LockRank m_lock_rank_mask { LockRank::None };
  1138. #if LOCK_DEBUG
  1139. struct HoldingLockInfo {
  1140. Mutex* lock;
  1141. LockLocation lock_location;
  1142. unsigned count;
  1143. };
  1144. Atomic<u32> m_holding_locks { 0 };
  1145. Spinlock m_holding_locks_lock;
  1146. Vector<HoldingLockInfo> m_holding_locks_list;
  1147. #endif
  1148. JoinBlockerSet m_join_blocker_set;
  1149. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1150. bool m_is_joinable { true };
  1151. bool m_handling_page_fault { false };
  1152. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1153. unsigned m_syscall_count { 0 };
  1154. unsigned m_inode_faults { 0 };
  1155. unsigned m_zero_faults { 0 };
  1156. unsigned m_cow_faults { 0 };
  1157. unsigned m_file_read_bytes { 0 };
  1158. unsigned m_file_write_bytes { 0 };
  1159. unsigned m_unix_socket_read_bytes { 0 };
  1160. unsigned m_unix_socket_write_bytes { 0 };
  1161. unsigned m_ipv4_socket_read_bytes { 0 };
  1162. unsigned m_ipv4_socket_write_bytes { 0 };
  1163. FPUState m_fpu_state {};
  1164. State m_state { Invalid };
  1165. NonnullOwnPtr<KString> m_name;
  1166. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1167. State m_stop_state { Invalid };
  1168. bool m_dump_backtrace_on_finalization { false };
  1169. bool m_should_die { false };
  1170. bool m_initialized { false };
  1171. bool m_in_block { false };
  1172. bool m_is_idle_thread { false };
  1173. bool m_is_crashing { false };
  1174. bool m_is_promise_violation_pending { false };
  1175. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1176. Atomic<u32> m_nested_profiler_calls { 0 };
  1177. NonnullRefPtr<Timer> m_block_timer;
  1178. bool m_is_profiling_suppressed { false };
  1179. void yield_and_release_relock_big_lock();
  1180. enum class VerifyLockNotHeld {
  1181. Yes,
  1182. No
  1183. };
  1184. void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
  1185. void drop_thread_count(bool);
  1186. mutable IntrusiveListNode<Thread> m_global_thread_list_node;
  1187. public:
  1188. using ListInProcess = IntrusiveList<&Thread::m_process_thread_list_node>;
  1189. using GlobalList = IntrusiveList<&Thread::m_global_thread_list_node>;
  1190. static SpinlockProtected<GlobalList>& all_instances();
  1191. };
  1192. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1193. template<IteratorFunction<Thread&> Callback>
  1194. inline IterationDecision Thread::for_each(Callback callback)
  1195. {
  1196. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1197. for (auto& thread : list) {
  1198. IterationDecision decision = callback(thread);
  1199. if (decision != IterationDecision::Continue)
  1200. return decision;
  1201. }
  1202. return IterationDecision::Continue;
  1203. });
  1204. }
  1205. template<IteratorFunction<Thread&> Callback>
  1206. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1207. {
  1208. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1209. for (auto& thread : list) {
  1210. if (thread.state() != state)
  1211. continue;
  1212. IterationDecision decision = callback(thread);
  1213. if (decision != IterationDecision::Continue)
  1214. return decision;
  1215. }
  1216. return IterationDecision::Continue;
  1217. });
  1218. }
  1219. template<VoidFunction<Thread&> Callback>
  1220. inline IterationDecision Thread::for_each(Callback callback)
  1221. {
  1222. return Thread::all_instances().with([&](auto& list) {
  1223. for (auto& thread : list) {
  1224. if (callback(thread) == IterationDecision::Break)
  1225. return IterationDecision::Break;
  1226. }
  1227. return IterationDecision::Continue;
  1228. });
  1229. }
  1230. template<VoidFunction<Thread&> Callback>
  1231. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1232. {
  1233. return for_each_in_state(state, [&](auto& thread) {
  1234. callback(thread);
  1235. return IterationDecision::Continue;
  1236. });
  1237. }
  1238. }
  1239. template<>
  1240. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1241. ErrorOr<void> format(FormatBuilder&, Kernel::Thread const&);
  1242. };