Thread.h 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/EnumBits.h>
  9. #include <AK/HashMap.h>
  10. #include <AK/IntrusiveList.h>
  11. #include <AK/Optional.h>
  12. #include <AK/OwnPtr.h>
  13. #include <AK/String.h>
  14. #include <AK/TemporaryChange.h>
  15. #include <AK/Time.h>
  16. #include <AK/Variant.h>
  17. #include <AK/Vector.h>
  18. #include <AK/WeakPtr.h>
  19. #include <AK/Weakable.h>
  20. #include <Kernel/API/KResult.h>
  21. #include <Kernel/Arch/x86/RegisterState.h>
  22. #include <Kernel/Arch/x86/SafeMem.h>
  23. #include <Kernel/Debug.h>
  24. #include <Kernel/FileSystem/InodeIdentifier.h>
  25. #include <Kernel/Forward.h>
  26. #include <Kernel/KString.h>
  27. #include <Kernel/Library/ListedRefCounted.h>
  28. #include <Kernel/Locking/LockLocation.h>
  29. #include <Kernel/Locking/LockMode.h>
  30. #include <Kernel/Locking/SpinlockProtected.h>
  31. #include <Kernel/Memory/VirtualRange.h>
  32. #include <Kernel/Scheduler.h>
  33. #include <Kernel/TimerQueue.h>
  34. #include <Kernel/UnixTypes.h>
  35. #include <LibC/fd_set.h>
  36. #include <LibC/signal_numbers.h>
  37. namespace Kernel {
  38. namespace Memory {
  39. extern RecursiveSpinlock s_mm_lock;
  40. }
  41. enum class DispatchSignalResult {
  42. Deferred = 0,
  43. Yield,
  44. Terminate,
  45. Continue
  46. };
  47. struct SignalActionData {
  48. VirtualAddress handler_or_sigaction;
  49. u32 mask { 0 };
  50. int flags { 0 };
  51. };
  52. struct ThreadSpecificData {
  53. ThreadSpecificData* self;
  54. };
  55. #define THREAD_PRIORITY_MIN 1
  56. #define THREAD_PRIORITY_LOW 10
  57. #define THREAD_PRIORITY_NORMAL 30
  58. #define THREAD_PRIORITY_HIGH 50
  59. #define THREAD_PRIORITY_MAX 99
  60. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  61. struct ThreadRegisters {
  62. #if ARCH(I386)
  63. FlatPtr ss;
  64. FlatPtr gs;
  65. FlatPtr fs;
  66. FlatPtr es;
  67. FlatPtr ds;
  68. FlatPtr edi;
  69. FlatPtr esi;
  70. FlatPtr ebp;
  71. FlatPtr esp;
  72. FlatPtr ebx;
  73. FlatPtr edx;
  74. FlatPtr ecx;
  75. FlatPtr eax;
  76. FlatPtr eip;
  77. FlatPtr esp0;
  78. FlatPtr ss0;
  79. #else
  80. FlatPtr rdi;
  81. FlatPtr rsi;
  82. FlatPtr rbp;
  83. FlatPtr rsp;
  84. FlatPtr rbx;
  85. FlatPtr rdx;
  86. FlatPtr rcx;
  87. FlatPtr rax;
  88. FlatPtr r8;
  89. FlatPtr r9;
  90. FlatPtr r10;
  91. FlatPtr r11;
  92. FlatPtr r12;
  93. FlatPtr r13;
  94. FlatPtr r14;
  95. FlatPtr r15;
  96. FlatPtr rip;
  97. FlatPtr rsp0;
  98. #endif
  99. FlatPtr cs;
  100. #if ARCH(I386)
  101. FlatPtr eflags;
  102. FlatPtr flags() const { return eflags; }
  103. void set_flags(FlatPtr value) { eflags = value; }
  104. void set_sp(FlatPtr value) { esp = value; }
  105. void set_sp0(FlatPtr value) { esp0 = value; }
  106. void set_ip(FlatPtr value) { eip = value; }
  107. #else
  108. FlatPtr rflags;
  109. FlatPtr flags() const { return rflags; }
  110. void set_flags(FlatPtr value) { rflags = value; }
  111. void set_sp(FlatPtr value) { rsp = value; }
  112. void set_sp0(FlatPtr value) { rsp0 = value; }
  113. void set_ip(FlatPtr value) { rip = value; }
  114. #endif
  115. FlatPtr cr3;
  116. FlatPtr ip() const
  117. {
  118. #if ARCH(I386)
  119. return eip;
  120. #else
  121. return rip;
  122. #endif
  123. }
  124. FlatPtr sp() const
  125. {
  126. #if ARCH(I386)
  127. return esp;
  128. #else
  129. return rsp;
  130. #endif
  131. }
  132. };
  133. class Thread
  134. : public ListedRefCounted<Thread>
  135. , public Weakable<Thread> {
  136. AK_MAKE_NONCOPYABLE(Thread);
  137. AK_MAKE_NONMOVABLE(Thread);
  138. friend class Mutex;
  139. friend class Process;
  140. friend class Scheduler;
  141. friend struct ThreadReadyQueue;
  142. public:
  143. inline static Thread* current()
  144. {
  145. return Processor::current_thread();
  146. }
  147. static KResultOr<NonnullRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
  148. ~Thread();
  149. static RefPtr<Thread> from_tid(ThreadID);
  150. static void finalize_dying_threads();
  151. ThreadID tid() const { return m_tid; }
  152. ProcessID pid() const;
  153. void set_priority(u32 p) { m_priority = p; }
  154. u32 priority() const { return m_priority; }
  155. void detach()
  156. {
  157. SpinlockLocker lock(m_lock);
  158. m_is_joinable = false;
  159. }
  160. [[nodiscard]] bool is_joinable() const
  161. {
  162. SpinlockLocker lock(m_lock);
  163. return m_is_joinable;
  164. }
  165. Process& process() { return m_process; }
  166. const Process& process() const { return m_process; }
  167. // NOTE: This returns a null-terminated string.
  168. StringView name() const
  169. {
  170. // NOTE: Whoever is calling this needs to be holding our lock while reading the name.
  171. VERIFY(m_lock.is_locked_by_current_processor());
  172. return m_name ? m_name->view() : StringView {};
  173. }
  174. void set_name(OwnPtr<KString> name)
  175. {
  176. SpinlockLocker lock(m_lock);
  177. m_name = move(name);
  178. }
  179. void finalize();
  180. enum State : u8 {
  181. Invalid = 0,
  182. Runnable,
  183. Running,
  184. Dying,
  185. Dead,
  186. Stopped,
  187. Blocked
  188. };
  189. class [[nodiscard]] BlockResult {
  190. public:
  191. enum Type {
  192. WokeNormally,
  193. NotBlocked,
  194. InterruptedBySignal,
  195. InterruptedByDeath,
  196. InterruptedByTimeout,
  197. };
  198. BlockResult() = delete;
  199. BlockResult(Type type)
  200. : m_type(type)
  201. {
  202. }
  203. bool operator==(Type type) const
  204. {
  205. return m_type == type;
  206. }
  207. bool operator!=(Type type) const
  208. {
  209. return m_type != type;
  210. }
  211. [[nodiscard]] bool was_interrupted() const
  212. {
  213. switch (m_type) {
  214. case InterruptedBySignal:
  215. case InterruptedByDeath:
  216. return true;
  217. default:
  218. return false;
  219. }
  220. }
  221. private:
  222. Type m_type;
  223. };
  224. class BlockTimeout {
  225. public:
  226. BlockTimeout()
  227. : m_infinite(true)
  228. {
  229. }
  230. explicit BlockTimeout(bool is_absolute, const Time* time, const Time* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
  231. const Time& absolute_time() const { return m_time; }
  232. const Time* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  233. clockid_t clock_id() const { return m_clock_id; }
  234. bool is_infinite() const { return m_infinite; }
  235. private:
  236. Time m_time {};
  237. Time m_start_time {};
  238. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  239. bool m_infinite { false };
  240. };
  241. class BlockerSet;
  242. class Blocker {
  243. AK_MAKE_NONMOVABLE(Blocker);
  244. AK_MAKE_NONCOPYABLE(Blocker);
  245. public:
  246. enum class Type {
  247. Unknown = 0,
  248. File,
  249. Futex,
  250. Plan9FS,
  251. Join,
  252. Queue,
  253. Routing,
  254. Sleep,
  255. Wait
  256. };
  257. virtual ~Blocker();
  258. virtual StringView state_string() const = 0;
  259. virtual Type blocker_type() const = 0;
  260. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  261. virtual bool can_be_interrupted() const { return true; }
  262. virtual bool setup_blocker();
  263. Thread& thread() { return m_thread; }
  264. enum class UnblockImmediatelyReason {
  265. UnblockConditionAlreadyMet,
  266. TimeoutInThePast,
  267. };
  268. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
  269. virtual void was_unblocked(bool did_timeout)
  270. {
  271. if (did_timeout) {
  272. SpinlockLocker lock(m_lock);
  273. m_did_timeout = true;
  274. }
  275. }
  276. void set_interrupted_by_death()
  277. {
  278. SpinlockLocker lock(m_lock);
  279. do_set_interrupted_by_death();
  280. }
  281. void set_interrupted_by_signal(u8 signal)
  282. {
  283. SpinlockLocker lock(m_lock);
  284. do_set_interrupted_by_signal(signal);
  285. }
  286. u8 was_interrupted_by_signal() const
  287. {
  288. SpinlockLocker lock(m_lock);
  289. return do_get_interrupted_by_signal();
  290. }
  291. virtual Thread::BlockResult block_result()
  292. {
  293. SpinlockLocker lock(m_lock);
  294. if (m_was_interrupted_by_death)
  295. return Thread::BlockResult::InterruptedByDeath;
  296. if (m_was_interrupted_by_signal != 0)
  297. return Thread::BlockResult::InterruptedBySignal;
  298. if (m_did_timeout)
  299. return Thread::BlockResult::InterruptedByTimeout;
  300. return Thread::BlockResult::WokeNormally;
  301. }
  302. void begin_blocking(Badge<Thread>);
  303. BlockResult end_blocking(Badge<Thread>, bool);
  304. protected:
  305. Blocker()
  306. : m_thread(*Thread::current())
  307. {
  308. }
  309. void do_set_interrupted_by_death()
  310. {
  311. m_was_interrupted_by_death = true;
  312. }
  313. void do_set_interrupted_by_signal(u8 signal)
  314. {
  315. VERIFY(signal != 0);
  316. m_was_interrupted_by_signal = signal;
  317. }
  318. void do_clear_interrupted_by_signal()
  319. {
  320. m_was_interrupted_by_signal = 0;
  321. }
  322. u8 do_get_interrupted_by_signal() const
  323. {
  324. return m_was_interrupted_by_signal;
  325. }
  326. [[nodiscard]] bool was_interrupted() const
  327. {
  328. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  329. }
  330. void unblock_from_blocker()
  331. {
  332. {
  333. SpinlockLocker lock(m_lock);
  334. if (!m_is_blocking)
  335. return;
  336. m_is_blocking = false;
  337. }
  338. m_thread->unblock_from_blocker(*this);
  339. }
  340. bool add_to_blocker_set(BlockerSet&, void* = nullptr);
  341. void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
  342. mutable RecursiveSpinlock m_lock;
  343. private:
  344. BlockerSet* m_blocker_set { nullptr };
  345. NonnullRefPtr<Thread> m_thread;
  346. u8 m_was_interrupted_by_signal { 0 };
  347. bool m_is_blocking { false };
  348. bool m_was_interrupted_by_death { false };
  349. bool m_did_timeout { false };
  350. };
  351. class BlockerSet {
  352. AK_MAKE_NONCOPYABLE(BlockerSet);
  353. AK_MAKE_NONMOVABLE(BlockerSet);
  354. public:
  355. BlockerSet() = default;
  356. virtual ~BlockerSet()
  357. {
  358. VERIFY(!m_lock.is_locked());
  359. VERIFY(m_blockers.is_empty());
  360. }
  361. bool add_blocker(Blocker& blocker, void* data)
  362. {
  363. SpinlockLocker lock(m_lock);
  364. if (!should_add_blocker(blocker, data))
  365. return false;
  366. m_blockers.append({ &blocker, data });
  367. return true;
  368. }
  369. void remove_blocker(Blocker& blocker)
  370. {
  371. SpinlockLocker lock(m_lock);
  372. // NOTE: it's possible that the blocker is no longer present
  373. m_blockers.remove_all_matching([&](auto& info) {
  374. return info.blocker == &blocker;
  375. });
  376. }
  377. bool is_empty() const
  378. {
  379. SpinlockLocker lock(m_lock);
  380. return is_empty_locked();
  381. }
  382. protected:
  383. template<typename Callback>
  384. bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
  385. {
  386. SpinlockLocker lock(m_lock);
  387. return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
  388. }
  389. template<typename Callback>
  390. bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
  391. {
  392. VERIFY(m_lock.is_locked());
  393. bool stop_iterating = false;
  394. bool did_unblock_any = false;
  395. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  396. auto& info = m_blockers[i];
  397. if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
  398. m_blockers.remove(i);
  399. did_unblock_any = true;
  400. continue;
  401. }
  402. i++;
  403. }
  404. return did_unblock_any;
  405. }
  406. bool is_empty_locked() const
  407. {
  408. VERIFY(m_lock.is_locked());
  409. return m_blockers.is_empty();
  410. }
  411. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  412. struct BlockerInfo {
  413. Blocker* blocker;
  414. void* data;
  415. };
  416. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  417. {
  418. if (m_blockers.size() <= count)
  419. return move(m_blockers);
  420. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  421. VERIFY(move_count > 0);
  422. Vector<BlockerInfo, 4> taken_blockers;
  423. taken_blockers.ensure_capacity(move_count);
  424. for (size_t i = 0; i < move_count; i++)
  425. taken_blockers.append(m_blockers.take(i));
  426. m_blockers.remove(0, move_count);
  427. return taken_blockers;
  428. }
  429. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  430. {
  431. if (blockers_to_append.is_empty())
  432. return;
  433. if (m_blockers.is_empty()) {
  434. m_blockers = move(blockers_to_append);
  435. return;
  436. }
  437. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  438. for (size_t i = 0; i < blockers_to_append.size(); i++)
  439. m_blockers.append(blockers_to_append.take(i));
  440. blockers_to_append.clear();
  441. }
  442. mutable Spinlock<u8> m_lock;
  443. private:
  444. Vector<BlockerInfo, 4> m_blockers;
  445. };
  446. friend class JoinBlocker;
  447. class JoinBlocker final : public Blocker {
  448. public:
  449. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  450. virtual Type blocker_type() const override { return Type::Join; }
  451. virtual StringView state_string() const override { return "Joining"sv; }
  452. virtual bool can_be_interrupted() const override { return false; }
  453. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  454. virtual bool setup_blocker() override;
  455. bool unblock(void*, bool);
  456. private:
  457. NonnullRefPtr<Thread> m_joinee;
  458. void*& m_joinee_exit_value;
  459. KResult& m_try_join_result;
  460. bool m_did_unblock { false };
  461. };
  462. class WaitQueueBlocker final : public Blocker {
  463. public:
  464. explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
  465. virtual ~WaitQueueBlocker();
  466. virtual Type blocker_type() const override { return Type::Queue; }
  467. virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
  468. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  469. virtual bool setup_blocker() override;
  470. bool unblock();
  471. protected:
  472. WaitQueue& m_wait_queue;
  473. StringView m_block_reason;
  474. bool m_did_unblock { false };
  475. };
  476. class FutexBlocker final : public Blocker {
  477. public:
  478. explicit FutexBlocker(FutexQueue&, u32);
  479. virtual ~FutexBlocker();
  480. virtual Type blocker_type() const override { return Type::Futex; }
  481. virtual StringView state_string() const override { return "Futex"sv; }
  482. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
  483. virtual bool setup_blocker() override;
  484. u32 bitset() const { return m_bitset; }
  485. void begin_requeue()
  486. {
  487. // We need to hold the lock until we moved it over
  488. m_relock_flags = m_lock.lock();
  489. }
  490. void finish_requeue(FutexQueue&);
  491. bool unblock_bitset(u32 bitset);
  492. bool unblock(bool force = false);
  493. protected:
  494. FutexQueue& m_futex_queue;
  495. u32 m_bitset { 0 };
  496. u32 m_relock_flags { 0 };
  497. bool m_did_unblock { false };
  498. };
  499. class FileBlocker : public Blocker {
  500. public:
  501. enum class BlockFlags : u16 {
  502. None = 0,
  503. Read = 1 << 0,
  504. Write = 1 << 1,
  505. ReadPriority = 1 << 2,
  506. Accept = 1 << 3,
  507. Connect = 1 << 4,
  508. SocketFlags = Accept | Connect,
  509. WriteNotOpen = 1 << 5,
  510. WriteError = 1 << 6,
  511. WriteHangUp = 1 << 7,
  512. ReadHangUp = 1 << 8,
  513. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  514. };
  515. virtual Type blocker_type() const override { return Type::File; }
  516. virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
  517. };
  518. class FileDescriptionBlocker : public FileBlocker {
  519. public:
  520. const FileDescription& blocked_description() const;
  521. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  522. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  523. virtual bool setup_blocker() override;
  524. protected:
  525. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  526. private:
  527. NonnullRefPtr<FileDescription> m_blocked_description;
  528. const BlockFlags m_flags;
  529. BlockFlags& m_unblocked_flags;
  530. bool m_did_unblock { false };
  531. };
  532. class AcceptBlocker final : public FileDescriptionBlocker {
  533. public:
  534. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  535. virtual StringView state_string() const override { return "Accepting"sv; }
  536. };
  537. class ConnectBlocker final : public FileDescriptionBlocker {
  538. public:
  539. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  540. virtual StringView state_string() const override { return "Connecting"sv; }
  541. };
  542. class WriteBlocker final : public FileDescriptionBlocker {
  543. public:
  544. explicit WriteBlocker(FileDescription&, BlockFlags&);
  545. virtual StringView state_string() const override { return "Writing"sv; }
  546. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  547. private:
  548. BlockTimeout m_timeout;
  549. };
  550. class ReadBlocker final : public FileDescriptionBlocker {
  551. public:
  552. explicit ReadBlocker(FileDescription&, BlockFlags&);
  553. virtual StringView state_string() const override { return "Reading"sv; }
  554. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  555. private:
  556. BlockTimeout m_timeout;
  557. };
  558. class SleepBlocker final : public Blocker {
  559. public:
  560. explicit SleepBlocker(const BlockTimeout&, Time* = nullptr);
  561. virtual StringView state_string() const override { return "Sleeping"sv; }
  562. virtual Type blocker_type() const override { return Type::Sleep; }
  563. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  564. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  565. virtual void was_unblocked(bool) override;
  566. virtual Thread::BlockResult block_result() override;
  567. private:
  568. void calculate_remaining();
  569. BlockTimeout m_deadline;
  570. Time* m_remaining;
  571. };
  572. class SelectBlocker final : public FileBlocker {
  573. public:
  574. struct FDInfo {
  575. NonnullRefPtr<FileDescription> description;
  576. BlockFlags block_flags { BlockFlags::None };
  577. BlockFlags unblocked_flags { BlockFlags::None };
  578. };
  579. using FDVector = Vector<FDInfo, FD_SETSIZE>;
  580. explicit SelectBlocker(FDVector&);
  581. virtual ~SelectBlocker();
  582. virtual bool unblock_if_conditions_are_met(bool, void*) override;
  583. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  584. virtual void was_unblocked(bool) override;
  585. virtual StringView state_string() const override { return "Selecting"sv; }
  586. virtual bool setup_blocker() override;
  587. private:
  588. size_t collect_unblocked_flags();
  589. FDVector& m_fds;
  590. bool m_did_unblock { false };
  591. };
  592. class WaitBlocker final : public Blocker {
  593. public:
  594. enum class UnblockFlags {
  595. Terminated,
  596. Stopped,
  597. Continued,
  598. Disowned
  599. };
  600. WaitBlocker(int wait_options, Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, KResultOr<siginfo_t>& result);
  601. virtual StringView state_string() const override { return "Waiting"sv; }
  602. virtual Type blocker_type() const override { return Type::Wait; }
  603. virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
  604. virtual void was_unblocked(bool) override;
  605. virtual bool setup_blocker() override;
  606. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  607. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  608. private:
  609. void do_was_disowned();
  610. void do_set_result(const siginfo_t&);
  611. const int m_wait_options;
  612. KResultOr<siginfo_t>& m_result;
  613. Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> m_waitee;
  614. bool m_did_unblock { false };
  615. bool m_got_sigchild { false };
  616. };
  617. class WaitBlockerSet final : public BlockerSet {
  618. friend class WaitBlocker;
  619. public:
  620. explicit WaitBlockerSet(Process& process)
  621. : m_process(process)
  622. {
  623. }
  624. void disowned_by_waiter(Process&);
  625. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  626. void try_unblock(WaitBlocker&);
  627. void finalize();
  628. protected:
  629. virtual bool should_add_blocker(Blocker&, void*) override;
  630. private:
  631. struct ProcessBlockInfo {
  632. NonnullRefPtr<Process> process;
  633. WaitBlocker::UnblockFlags flags;
  634. u8 signal;
  635. bool was_waited { false };
  636. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  637. ~ProcessBlockInfo();
  638. };
  639. Process& m_process;
  640. Vector<ProcessBlockInfo, 2> m_processes;
  641. bool m_finalized { false };
  642. };
  643. template<typename AddBlockerHandler>
  644. KResult try_join(AddBlockerHandler add_blocker)
  645. {
  646. if (Thread::current() == this)
  647. return EDEADLK;
  648. SpinlockLocker lock(m_lock);
  649. if (!m_is_joinable || state() == Dead)
  650. return EINVAL;
  651. add_blocker();
  652. // From this point on the thread is no longer joinable by anyone
  653. // else. It also means that if the join is timed, it becomes
  654. // detached when a timeout happens.
  655. m_is_joinable = false;
  656. return KSuccess;
  657. }
  658. void did_schedule() { ++m_times_scheduled; }
  659. u32 times_scheduled() const { return m_times_scheduled; }
  660. void resume_from_stopped();
  661. [[nodiscard]] bool should_be_stopped() const;
  662. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  663. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  664. [[nodiscard]] bool is_in_block() const
  665. {
  666. SpinlockLocker lock(m_block_lock);
  667. return m_in_block;
  668. }
  669. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  670. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  671. u32 affinity() const { return m_cpu_affinity; }
  672. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  673. RegisterState& get_register_dump_from_stack();
  674. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  675. DebugRegisterState& debug_register_state() { return m_debug_register_state; }
  676. const DebugRegisterState& debug_register_state() const { return m_debug_register_state; }
  677. ThreadRegisters& regs() { return m_regs; }
  678. ThreadRegisters const& regs() const { return m_regs; }
  679. State state() const { return m_state; }
  680. StringView state_string() const;
  681. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  682. size_t thread_specific_region_size() const;
  683. size_t thread_specific_region_alignment() const;
  684. ALWAYS_INLINE void yield_if_stopped()
  685. {
  686. // If some thread stopped us, we need to yield to someone else
  687. // We check this when entering/exiting a system call. A thread
  688. // may continue to execute in user land until the next timer
  689. // tick or entering the next system call, or if it's in kernel
  690. // mode then we will intercept prior to returning back to user
  691. // mode.
  692. SpinlockLocker lock(m_lock);
  693. while (state() == Thread::Stopped) {
  694. lock.unlock();
  695. // We shouldn't be holding the big lock here
  696. yield_without_releasing_big_lock();
  697. lock.lock();
  698. }
  699. }
  700. void block(Kernel::Mutex&, SpinlockLocker<Spinlock<u8>>&, u32);
  701. template<typename BlockerType, class... Args>
  702. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  703. {
  704. VERIFY(!Processor::current_in_irq());
  705. VERIFY(this == Thread::current());
  706. ScopedCritical critical;
  707. VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
  708. SpinlockLocker block_lock(m_block_lock);
  709. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  710. // as it is constructed and registered elsewhere
  711. VERIFY(!m_in_block);
  712. TemporaryChange in_block_change(m_in_block, true);
  713. BlockerType blocker(forward<Args>(args)...);
  714. if (!blocker.setup_blocker()) {
  715. blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::UnblockConditionAlreadyMet);
  716. return BlockResult::NotBlocked;
  717. }
  718. SpinlockLocker scheduler_lock(g_scheduler_lock);
  719. // Relaxed semantics are fine for timeout_unblocked because we
  720. // synchronize on the spin locks already.
  721. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  722. bool timer_was_added = false;
  723. switch (state()) {
  724. case Thread::Stopped:
  725. // It's possible that we were requested to be stopped!
  726. break;
  727. case Thread::Running:
  728. VERIFY(m_blocker == nullptr);
  729. break;
  730. default:
  731. VERIFY_NOT_REACHED();
  732. }
  733. m_blocker = &blocker;
  734. if (auto& block_timeout = blocker.override_timeout(timeout); !block_timeout.is_infinite()) {
  735. // Process::kill_all_threads may be called at any time, which will mark all
  736. // threads to die. In that case
  737. timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  738. VERIFY(!Processor::current_in_irq());
  739. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  740. VERIFY(!m_block_lock.is_locked_by_current_processor());
  741. // NOTE: this may execute on the same or any other processor!
  742. SpinlockLocker scheduler_lock(g_scheduler_lock);
  743. SpinlockLocker block_lock(m_block_lock);
  744. if (m_blocker && timeout_unblocked.exchange(true) == false)
  745. unblock();
  746. });
  747. if (!timer_was_added) {
  748. // Timeout is already in the past
  749. blocker.will_unblock_immediately_without_blocking(Blocker::UnblockImmediatelyReason::TimeoutInThePast);
  750. m_blocker = nullptr;
  751. return BlockResult::InterruptedByTimeout;
  752. }
  753. }
  754. blocker.begin_blocking({});
  755. set_state(Thread::Blocked);
  756. scheduler_lock.unlock();
  757. block_lock.unlock();
  758. dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string());
  759. bool did_timeout = false;
  760. u32 lock_count_to_restore = 0;
  761. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  762. for (;;) {
  763. // Yield to the scheduler, and wait for us to resume unblocked.
  764. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  765. VERIFY(Processor::in_critical());
  766. yield_without_releasing_big_lock();
  767. VERIFY(Processor::in_critical());
  768. SpinlockLocker block_lock2(m_block_lock);
  769. if (should_be_stopped() || state() == Stopped) {
  770. dbgln("Thread should be stopped, current state: {}", state_string());
  771. set_state(Thread::Blocked);
  772. continue;
  773. }
  774. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  775. block_lock2.unlock();
  776. dbgln("Thread should not be unblocking, current state: {}", state_string());
  777. set_state(Thread::Blocked);
  778. continue;
  779. }
  780. // Prevent the timeout from unblocking this thread if it happens to
  781. // be in the process of firing already
  782. did_timeout |= timeout_unblocked.exchange(true);
  783. if (m_blocker) {
  784. // Remove ourselves...
  785. VERIFY(m_blocker == &blocker);
  786. m_blocker = nullptr;
  787. }
  788. dbgln_if(THREAD_DEBUG, "<-- Thread {} unblocked from {} ({})", *this, &blocker, blocker.state_string());
  789. break;
  790. }
  791. if (blocker.was_interrupted_by_signal()) {
  792. SpinlockLocker scheduler_lock(g_scheduler_lock);
  793. SpinlockLocker lock(m_lock);
  794. dispatch_one_pending_signal();
  795. }
  796. // Notify the blocker that we are no longer blocking. It may need
  797. // to clean up now while we're still holding m_lock
  798. auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
  799. if (timer_was_added && !did_timeout) {
  800. // Cancel the timer while not holding any locks. This allows
  801. // the timer function to complete before we remove it
  802. // (e.g. if it's on another processor)
  803. TimerQueue::the().cancel_timer(*m_block_timer);
  804. }
  805. if (previous_locked != LockMode::Unlocked) {
  806. // NOTE: this may trigger another call to Thread::block(), so
  807. // we need to do this after we're all done and restored m_in_block!
  808. relock_process(previous_locked, lock_count_to_restore);
  809. }
  810. return result;
  811. }
  812. u32 unblock_from_lock(Kernel::Mutex&);
  813. void unblock_from_blocker(Blocker&);
  814. void unblock(u8 signal = 0);
  815. template<class... Args>
  816. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  817. {
  818. VERIFY(this == Thread::current());
  819. return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  820. }
  821. BlockResult sleep(clockid_t, const Time&, Time* = nullptr);
  822. BlockResult sleep(const Time& duration, Time* remaining_time = nullptr)
  823. {
  824. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  825. }
  826. BlockResult sleep_until(clockid_t, const Time&);
  827. BlockResult sleep_until(const Time& duration)
  828. {
  829. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  830. }
  831. // Tell this thread to unblock if needed,
  832. // gracefully unwind the stack and die.
  833. void set_should_die();
  834. [[nodiscard]] bool should_die() const { return m_should_die; }
  835. void die_if_needed();
  836. void exit(void* = nullptr);
  837. void update_time_scheduled(u64, bool, bool);
  838. bool tick();
  839. void set_ticks_left(u32 t) { m_ticks_left = t; }
  840. u32 ticks_left() const { return m_ticks_left; }
  841. FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
  842. FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
  843. void set_state(State, u8 = 0);
  844. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  845. void set_initialized(bool initialized) { m_initialized = initialized; }
  846. void send_urgent_signal_to_self(u8 signal);
  847. void send_signal(u8 signal, Process* sender);
  848. u32 update_signal_mask(u32 signal_mask);
  849. u32 signal_mask_block(sigset_t signal_set, bool block);
  850. u32 signal_mask() const;
  851. void clear_signals();
  852. KResultOr<u32> peek_debug_register(u32 register_index);
  853. KResult poke_debug_register(u32 register_index, u32 data);
  854. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  855. DispatchSignalResult dispatch_one_pending_signal();
  856. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  857. DispatchSignalResult dispatch_signal(u8 signal);
  858. void check_dispatch_pending_signal();
  859. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  860. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  861. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  862. u32 pending_signals() const;
  863. u32 pending_signals_for_state() const;
  864. FPUState& fpu_state() { return m_fpu_state; }
  865. KResult make_thread_specific_region(Badge<Process>);
  866. unsigned syscall_count() const { return m_syscall_count; }
  867. void did_syscall() { ++m_syscall_count; }
  868. unsigned inode_faults() const { return m_inode_faults; }
  869. void did_inode_fault() { ++m_inode_faults; }
  870. unsigned zero_faults() const { return m_zero_faults; }
  871. void did_zero_fault() { ++m_zero_faults; }
  872. unsigned cow_faults() const { return m_cow_faults; }
  873. void did_cow_fault() { ++m_cow_faults; }
  874. unsigned file_read_bytes() const { return m_file_read_bytes; }
  875. unsigned file_write_bytes() const { return m_file_write_bytes; }
  876. void did_file_read(unsigned bytes)
  877. {
  878. m_file_read_bytes += bytes;
  879. }
  880. void did_file_write(unsigned bytes)
  881. {
  882. m_file_write_bytes += bytes;
  883. }
  884. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  885. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  886. void did_unix_socket_read(unsigned bytes)
  887. {
  888. m_unix_socket_read_bytes += bytes;
  889. }
  890. void did_unix_socket_write(unsigned bytes)
  891. {
  892. m_unix_socket_write_bytes += bytes;
  893. }
  894. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  895. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  896. void did_ipv4_socket_read(unsigned bytes)
  897. {
  898. m_ipv4_socket_read_bytes += bytes;
  899. }
  900. void did_ipv4_socket_write(unsigned bytes)
  901. {
  902. m_ipv4_socket_write_bytes += bytes;
  903. }
  904. void set_active(bool active) { m_is_active = active; }
  905. u32 saved_critical() const { return m_saved_critical; }
  906. void save_critical(u32 critical) { m_saved_critical = critical; }
  907. [[nodiscard]] bool is_active() const { return m_is_active; }
  908. [[nodiscard]] bool is_finalizable() const
  909. {
  910. // We can't finalize as long as this thread is still running
  911. // Note that checking for Running state here isn't sufficient
  912. // as the thread may not be in Running state but switching out.
  913. // m_is_active is set to false once the context switch is
  914. // complete and the thread is not executing on any processor.
  915. if (m_is_active.load(AK::memory_order_acquire))
  916. return false;
  917. // We can't finalize until the thread is either detached or
  918. // a join has started. We can't make m_is_joinable atomic
  919. // because that would introduce a race in try_join.
  920. SpinlockLocker lock(m_lock);
  921. return !m_is_joinable;
  922. }
  923. KResultOr<NonnullRefPtr<Thread>> try_clone(Process&);
  924. template<IteratorFunction<Thread&> Callback>
  925. static IterationDecision for_each_in_state(State, Callback);
  926. template<IteratorFunction<Thread&> Callback>
  927. static IterationDecision for_each(Callback);
  928. template<VoidFunction<Thread&> Callback>
  929. static IterationDecision for_each_in_state(State, Callback);
  930. template<VoidFunction<Thread&> Callback>
  931. static IterationDecision for_each(Callback);
  932. static constexpr u32 default_kernel_stack_size = 65536;
  933. static constexpr u32 default_userspace_stack_size = 1 * MiB;
  934. u64 time_in_user() const { return m_total_time_scheduled_user; }
  935. u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
  936. enum class PreviousMode : u8 {
  937. KernelMode = 0,
  938. UserMode
  939. };
  940. PreviousMode previous_mode() const { return m_previous_mode; }
  941. bool set_previous_mode(PreviousMode mode)
  942. {
  943. if (m_previous_mode == mode)
  944. return false;
  945. m_previous_mode = mode;
  946. return true;
  947. }
  948. TrapFrame*& current_trap() { return m_current_trap; }
  949. TrapFrame const* const& current_trap() const { return m_current_trap; }
  950. RecursiveSpinlock& get_lock() const { return m_lock; }
  951. #if LOCK_DEBUG
  952. void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
  953. {
  954. VERIFY(refs_delta != 0);
  955. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  956. SpinlockLocker list_lock(m_holding_locks_lock);
  957. if (refs_delta > 0) {
  958. bool have_existing = false;
  959. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  960. auto& info = m_holding_locks_list[i];
  961. if (info.lock == &lock) {
  962. have_existing = true;
  963. info.count += refs_delta;
  964. break;
  965. }
  966. }
  967. if (!have_existing)
  968. m_holding_locks_list.append({ &lock, location, 1 });
  969. } else {
  970. VERIFY(refs_delta < 0);
  971. bool found = false;
  972. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  973. auto& info = m_holding_locks_list[i];
  974. if (info.lock == &lock) {
  975. VERIFY(info.count >= (unsigned)-refs_delta);
  976. info.count -= (unsigned)-refs_delta;
  977. if (info.count == 0)
  978. m_holding_locks_list.remove(i);
  979. found = true;
  980. break;
  981. }
  982. }
  983. VERIFY(found);
  984. }
  985. }
  986. u32 lock_count() const
  987. {
  988. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  989. }
  990. #endif
  991. bool is_handling_page_fault() const
  992. {
  993. return m_handling_page_fault;
  994. }
  995. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  996. void set_idle_thread() { m_is_idle_thread = true; }
  997. bool is_idle_thread() const { return m_is_idle_thread; }
  998. ALWAYS_INLINE u32 enter_profiler()
  999. {
  1000. return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  1001. }
  1002. ALWAYS_INLINE u32 leave_profiler()
  1003. {
  1004. return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
  1005. }
  1006. bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
  1007. void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
  1008. String backtrace();
  1009. private:
  1010. Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, OwnPtr<KString>);
  1011. IntrusiveListNode<Thread> m_process_thread_list_node;
  1012. int m_runnable_priority { -1 };
  1013. friend class WaitQueue;
  1014. class JoinBlockerSet final : public BlockerSet {
  1015. public:
  1016. void thread_did_exit(void* exit_value)
  1017. {
  1018. SpinlockLocker lock(m_lock);
  1019. VERIFY(!m_thread_did_exit);
  1020. m_thread_did_exit = true;
  1021. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1022. do_unblock_joiner();
  1023. }
  1024. void thread_finalizing()
  1025. {
  1026. SpinlockLocker lock(m_lock);
  1027. do_unblock_joiner();
  1028. }
  1029. void* exit_value() const
  1030. {
  1031. VERIFY(m_thread_did_exit);
  1032. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1033. }
  1034. void try_unblock(JoinBlocker& blocker)
  1035. {
  1036. SpinlockLocker lock(m_lock);
  1037. if (m_thread_did_exit)
  1038. blocker.unblock(exit_value(), false);
  1039. }
  1040. protected:
  1041. virtual bool should_add_blocker(Blocker& b, void*) override
  1042. {
  1043. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1044. auto& blocker = static_cast<JoinBlocker&>(b);
  1045. // NOTE: m_lock is held already!
  1046. if (m_thread_did_exit) {
  1047. blocker.unblock(exit_value(), true);
  1048. return false;
  1049. }
  1050. return true;
  1051. }
  1052. private:
  1053. void do_unblock_joiner()
  1054. {
  1055. unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
  1056. VERIFY(b.blocker_type() == Blocker::Type::Join);
  1057. auto& blocker = static_cast<JoinBlocker&>(b);
  1058. return blocker.unblock(exit_value(), false);
  1059. });
  1060. }
  1061. Atomic<void*> m_exit_value { nullptr };
  1062. bool m_thread_did_exit { false };
  1063. };
  1064. LockMode unlock_process_if_locked(u32&);
  1065. void relock_process(LockMode, u32);
  1066. void reset_fpu_state();
  1067. mutable RecursiveSpinlock m_lock;
  1068. mutable RecursiveSpinlock m_block_lock;
  1069. NonnullRefPtr<Process> m_process;
  1070. ThreadID m_tid { -1 };
  1071. ThreadRegisters m_regs {};
  1072. DebugRegisterState m_debug_register_state {};
  1073. TrapFrame* m_current_trap { nullptr };
  1074. u32 m_saved_critical { 1 };
  1075. IntrusiveListNode<Thread> m_ready_queue_node;
  1076. Atomic<u32> m_cpu { 0 };
  1077. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1078. Optional<u64> m_last_time_scheduled;
  1079. u64 m_total_time_scheduled_user { 0 };
  1080. u64 m_total_time_scheduled_kernel { 0 };
  1081. u32 m_ticks_left { 0 };
  1082. u32 m_times_scheduled { 0 };
  1083. u32 m_ticks_in_user { 0 };
  1084. u32 m_ticks_in_kernel { 0 };
  1085. u32 m_pending_signals { 0 };
  1086. u32 m_signal_mask { 0 };
  1087. FlatPtr m_kernel_stack_base { 0 };
  1088. FlatPtr m_kernel_stack_top { 0 };
  1089. OwnPtr<Memory::Region> m_kernel_stack_region;
  1090. VirtualAddress m_thread_specific_data;
  1091. Optional<Memory::VirtualRange> m_thread_specific_range;
  1092. Array<SignalActionData, NSIG> m_signal_action_data;
  1093. Blocker* m_blocker { nullptr };
  1094. Kernel::Mutex* m_blocking_lock { nullptr };
  1095. u32 m_lock_requested_count { 0 };
  1096. IntrusiveListNode<Thread> m_blocked_threads_list_node;
  1097. #if LOCK_DEBUG
  1098. struct HoldingLockInfo {
  1099. Mutex* lock;
  1100. LockLocation lock_location;
  1101. unsigned count;
  1102. };
  1103. Atomic<u32> m_holding_locks { 0 };
  1104. Spinlock<u8> m_holding_locks_lock;
  1105. Vector<HoldingLockInfo> m_holding_locks_list;
  1106. #endif
  1107. JoinBlockerSet m_join_blocker_set;
  1108. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1109. bool m_is_joinable { true };
  1110. bool m_handling_page_fault { false };
  1111. PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
  1112. unsigned m_syscall_count { 0 };
  1113. unsigned m_inode_faults { 0 };
  1114. unsigned m_zero_faults { 0 };
  1115. unsigned m_cow_faults { 0 };
  1116. unsigned m_file_read_bytes { 0 };
  1117. unsigned m_file_write_bytes { 0 };
  1118. unsigned m_unix_socket_read_bytes { 0 };
  1119. unsigned m_unix_socket_write_bytes { 0 };
  1120. unsigned m_ipv4_socket_read_bytes { 0 };
  1121. unsigned m_ipv4_socket_write_bytes { 0 };
  1122. FPUState m_fpu_state {};
  1123. State m_state { Invalid };
  1124. OwnPtr<KString> m_name;
  1125. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1126. State m_stop_state { Invalid };
  1127. bool m_dump_backtrace_on_finalization { false };
  1128. bool m_should_die { false };
  1129. bool m_initialized { false };
  1130. bool m_in_block { false };
  1131. bool m_is_idle_thread { false };
  1132. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1133. Atomic<u32> m_nested_profiler_calls { 0 };
  1134. NonnullRefPtr<Timer> m_block_timer;
  1135. bool m_is_profiling_suppressed { false };
  1136. void yield_and_release_relock_big_lock();
  1137. enum class VerifyLockNotHeld {
  1138. Yes,
  1139. No
  1140. };
  1141. void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
  1142. void drop_thread_count(bool);
  1143. mutable IntrusiveListNode<Thread> m_global_thread_list_node;
  1144. public:
  1145. using ListInProcess = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node>;
  1146. using GlobalList = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_global_thread_list_node>;
  1147. static SpinlockProtected<GlobalList>& all_instances();
  1148. };
  1149. AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
  1150. template<IteratorFunction<Thread&> Callback>
  1151. inline IterationDecision Thread::for_each(Callback callback)
  1152. {
  1153. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1154. for (auto& thread : list) {
  1155. IterationDecision decision = callback(thread);
  1156. if (decision != IterationDecision::Continue)
  1157. return decision;
  1158. }
  1159. return IterationDecision::Continue;
  1160. });
  1161. }
  1162. template<IteratorFunction<Thread&> Callback>
  1163. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1164. {
  1165. return Thread::all_instances().with([&](auto& list) -> IterationDecision {
  1166. for (auto& thread : list) {
  1167. if (thread.state() != state)
  1168. continue;
  1169. IterationDecision decision = callback(thread);
  1170. if (decision != IterationDecision::Continue)
  1171. return decision;
  1172. }
  1173. return IterationDecision::Continue;
  1174. });
  1175. }
  1176. template<VoidFunction<Thread&> Callback>
  1177. inline IterationDecision Thread::for_each(Callback callback)
  1178. {
  1179. return Thread::all_instances().with([&](auto& list) {
  1180. for (auto& thread : list) {
  1181. if (callback(thread) == IterationDecision::Break)
  1182. return IterationDecision::Break;
  1183. }
  1184. return IterationDecision::Continue;
  1185. });
  1186. }
  1187. template<VoidFunction<Thread&> Callback>
  1188. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1189. {
  1190. return for_each_in_state(state, [&](auto& thread) {
  1191. callback(thread);
  1192. return IterationDecision::Continue;
  1193. });
  1194. }
  1195. }
  1196. template<>
  1197. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1198. void format(FormatBuilder&, const Kernel::Thread&);
  1199. };