Thread.h 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/HashMap.h>
  29. #include <AK/IntrusiveList.h>
  30. #include <AK/Optional.h>
  31. #include <AK/OwnPtr.h>
  32. #include <AK/String.h>
  33. #include <AK/Time.h>
  34. #include <AK/Vector.h>
  35. #include <AK/WeakPtr.h>
  36. #include <AK/Weakable.h>
  37. #include <Kernel/Arch/i386/CPU.h>
  38. #include <Kernel/Arch/i386/SafeMem.h>
  39. #include <Kernel/Debug.h>
  40. #include <Kernel/Forward.h>
  41. #include <Kernel/KResult.h>
  42. #include <Kernel/LockMode.h>
  43. #include <Kernel/Scheduler.h>
  44. #include <Kernel/ThreadTracer.h>
  45. #include <Kernel/TimerQueue.h>
  46. #include <Kernel/UnixTypes.h>
  47. #include <LibC/fd_set.h>
  48. #include <LibELF/AuxiliaryVector.h>
  49. namespace Kernel {
  50. extern RecursiveSpinLock s_mm_lock;
  51. enum class DispatchSignalResult {
  52. Deferred = 0,
  53. Yield,
  54. Terminate,
  55. Continue
  56. };
  57. struct SignalActionData {
  58. VirtualAddress handler_or_sigaction;
  59. u32 mask { 0 };
  60. int flags { 0 };
  61. };
  62. struct ThreadSpecificData {
  63. ThreadSpecificData* self;
  64. };
  65. #define THREAD_PRIORITY_MIN 1
  66. #define THREAD_PRIORITY_LOW 10
  67. #define THREAD_PRIORITY_NORMAL 30
  68. #define THREAD_PRIORITY_HIGH 50
  69. #define THREAD_PRIORITY_MAX 99
  70. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  71. class Thread
  72. : public RefCounted<Thread>
  73. , public Weakable<Thread> {
  74. AK_MAKE_NONCOPYABLE(Thread);
  75. AK_MAKE_NONMOVABLE(Thread);
  76. friend class Process;
  77. friend class Scheduler;
  78. friend class ThreadReadyQueue;
  79. static SpinLock<u8> g_tid_map_lock;
  80. static HashMap<ThreadID, Thread*>* g_tid_map;
  81. public:
  82. inline static Thread* current()
  83. {
  84. return Processor::current_thread();
  85. }
  86. static void initialize();
  87. explicit Thread(NonnullRefPtr<Process>);
  88. ~Thread();
  89. static RefPtr<Thread> from_tid(ThreadID);
  90. static void finalize_dying_threads();
  91. ThreadID tid() const { return m_tid; }
  92. ProcessID pid() const;
  93. void set_priority(u32 p) { m_priority = p; }
  94. u32 priority() const { return m_priority; }
  95. void detach()
  96. {
  97. ScopedSpinLock lock(m_lock);
  98. m_is_joinable = false;
  99. }
  100. [[nodiscard]] bool is_joinable() const
  101. {
  102. ScopedSpinLock lock(m_lock);
  103. return m_is_joinable;
  104. }
  105. Process& process() { return m_process; }
  106. const Process& process() const { return m_process; }
  107. String backtrace();
  108. String name() const
  109. {
  110. // Because the name can be changed, we can't return a const
  111. // reference here. We must make a copy
  112. ScopedSpinLock lock(m_lock);
  113. return m_name;
  114. }
  115. void set_name(const StringView& s)
  116. {
  117. ScopedSpinLock lock(m_lock);
  118. m_name = s;
  119. }
  120. void set_name(String&& name)
  121. {
  122. ScopedSpinLock lock(m_lock);
  123. m_name = move(name);
  124. }
  125. void finalize();
  126. enum State : u8 {
  127. Invalid = 0,
  128. Runnable,
  129. Running,
  130. Dying,
  131. Dead,
  132. Stopped,
  133. Blocked
  134. };
  135. class BlockResult {
  136. public:
  137. enum Type {
  138. WokeNormally,
  139. NotBlocked,
  140. InterruptedBySignal,
  141. InterruptedByDeath,
  142. InterruptedByTimeout,
  143. };
  144. BlockResult() = delete;
  145. BlockResult(Type type)
  146. : m_type(type)
  147. {
  148. }
  149. bool operator==(Type type) const
  150. {
  151. return m_type == type;
  152. }
  153. bool operator!=(Type type) const
  154. {
  155. return m_type != type;
  156. }
  157. [[nodiscard]] bool was_interrupted() const
  158. {
  159. switch (m_type) {
  160. case InterruptedBySignal:
  161. case InterruptedByDeath:
  162. return true;
  163. default:
  164. return false;
  165. }
  166. }
  167. [[nodiscard]] bool timed_out() const
  168. {
  169. return m_type == InterruptedByTimeout;
  170. }
  171. private:
  172. Type m_type;
  173. };
  174. class BlockTimeout {
  175. public:
  176. BlockTimeout()
  177. : m_infinite(true)
  178. {
  179. }
  180. explicit BlockTimeout(bool is_absolute, const timeval* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE)
  181. : m_clock_id(clock_id)
  182. , m_infinite(!time)
  183. {
  184. if (!m_infinite) {
  185. if (time->tv_sec > 0 || time->tv_usec > 0) {
  186. timeval_to_timespec(*time, m_time);
  187. m_should_block = true;
  188. }
  189. m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id).value();
  190. if (!is_absolute)
  191. timespec_add(m_time, m_start_time, m_time);
  192. }
  193. }
  194. explicit BlockTimeout(bool is_absolute, const timespec* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE)
  195. : m_clock_id(clock_id)
  196. , m_infinite(!time)
  197. {
  198. if (!m_infinite) {
  199. if (time->tv_sec > 0 || time->tv_nsec > 0) {
  200. m_time = *time;
  201. m_should_block = true;
  202. }
  203. m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id).value();
  204. if (!is_absolute)
  205. timespec_add(m_time, m_start_time, m_time);
  206. }
  207. }
  208. const timespec& absolute_time() const { return m_time; }
  209. const timespec* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  210. clockid_t clock_id() const { return m_clock_id; }
  211. bool is_infinite() const { return m_infinite; }
  212. bool should_block() const { return m_infinite || m_should_block; };
  213. private:
  214. timespec m_time { 0, 0 };
  215. timespec m_start_time { 0, 0 };
  216. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  217. bool m_infinite { false };
  218. bool m_should_block { false };
  219. };
  220. class BlockCondition;
  221. class Blocker {
  222. public:
  223. enum class Type {
  224. Unknown = 0,
  225. File,
  226. Futex,
  227. Plan9FS,
  228. Join,
  229. Queue,
  230. Routing,
  231. Sleep,
  232. Wait
  233. };
  234. virtual ~Blocker();
  235. virtual const char* state_string() const = 0;
  236. virtual bool should_block() { return true; }
  237. virtual Type blocker_type() const = 0;
  238. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  239. virtual bool can_be_interrupted() const { return true; }
  240. virtual void not_blocking(bool) = 0;
  241. virtual void was_unblocked(bool did_timeout)
  242. {
  243. if (did_timeout) {
  244. ScopedSpinLock lock(m_lock);
  245. m_did_timeout = true;
  246. }
  247. }
  248. void set_interrupted_by_death()
  249. {
  250. ScopedSpinLock lock(m_lock);
  251. do_set_interrupted_by_death();
  252. }
  253. void set_interrupted_by_signal(u8 signal)
  254. {
  255. ScopedSpinLock lock(m_lock);
  256. do_set_interrupted_by_signal(signal);
  257. }
  258. u8 was_interrupted_by_signal() const
  259. {
  260. ScopedSpinLock lock(m_lock);
  261. return do_get_interrupted_by_signal();
  262. }
  263. virtual Thread::BlockResult block_result()
  264. {
  265. ScopedSpinLock lock(m_lock);
  266. if (m_was_interrupted_by_death)
  267. return Thread::BlockResult::InterruptedByDeath;
  268. if (m_was_interrupted_by_signal != 0)
  269. return Thread::BlockResult::InterruptedBySignal;
  270. if (m_did_timeout)
  271. return Thread::BlockResult::InterruptedByTimeout;
  272. return Thread::BlockResult::WokeNormally;
  273. }
  274. void begin_blocking(Badge<Thread>);
  275. BlockResult end_blocking(Badge<Thread>, bool);
  276. protected:
  277. void do_set_interrupted_by_death()
  278. {
  279. m_was_interrupted_by_death = true;
  280. }
  281. void do_set_interrupted_by_signal(u8 signal)
  282. {
  283. ASSERT(signal != 0);
  284. m_was_interrupted_by_signal = signal;
  285. }
  286. void do_clear_interrupted_by_signal()
  287. {
  288. m_was_interrupted_by_signal = 0;
  289. }
  290. u8 do_get_interrupted_by_signal() const
  291. {
  292. return m_was_interrupted_by_signal;
  293. }
  294. [[nodiscard]] bool was_interrupted() const
  295. {
  296. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  297. }
  298. void unblock_from_blocker()
  299. {
  300. RefPtr<Thread> thread;
  301. {
  302. ScopedSpinLock lock(m_lock);
  303. if (m_is_blocking) {
  304. m_is_blocking = false;
  305. ASSERT(m_blocked_thread);
  306. thread = m_blocked_thread;
  307. }
  308. }
  309. if (thread)
  310. thread->unblock_from_blocker(*this);
  311. }
  312. bool set_block_condition(BlockCondition&, void* = nullptr);
  313. void set_block_condition_raw_locked(BlockCondition* block_condition)
  314. {
  315. m_block_condition = block_condition;
  316. }
  317. mutable RecursiveSpinLock m_lock;
  318. private:
  319. BlockCondition* m_block_condition { nullptr };
  320. void* m_block_data { nullptr };
  321. Thread* m_blocked_thread { nullptr };
  322. u8 m_was_interrupted_by_signal { 0 };
  323. bool m_is_blocking { false };
  324. bool m_was_interrupted_by_death { false };
  325. bool m_did_timeout { false };
  326. };
  327. class BlockCondition {
  328. AK_MAKE_NONCOPYABLE(BlockCondition);
  329. AK_MAKE_NONMOVABLE(BlockCondition);
  330. public:
  331. BlockCondition() = default;
  332. virtual ~BlockCondition()
  333. {
  334. ScopedSpinLock lock(m_lock);
  335. ASSERT(m_blockers.is_empty());
  336. }
  337. bool add_blocker(Blocker& blocker, void* data)
  338. {
  339. ScopedSpinLock lock(m_lock);
  340. if (!should_add_blocker(blocker, data))
  341. return false;
  342. m_blockers.append({ &blocker, data });
  343. return true;
  344. }
  345. void remove_blocker(Blocker& blocker, void* data)
  346. {
  347. ScopedSpinLock lock(m_lock);
  348. // NOTE: it's possible that the blocker is no longer present
  349. m_blockers.remove_first_matching([&](auto& info) {
  350. return info.blocker == &blocker && info.data == data;
  351. });
  352. }
  353. bool is_empty() const
  354. {
  355. ScopedSpinLock lock(m_lock);
  356. return is_empty_locked();
  357. }
  358. protected:
  359. template<typename UnblockOne>
  360. bool unblock(UnblockOne unblock_one)
  361. {
  362. ScopedSpinLock lock(m_lock);
  363. return do_unblock(unblock_one);
  364. }
  365. template<typename UnblockOne>
  366. bool do_unblock(UnblockOne unblock_one)
  367. {
  368. ASSERT(m_lock.is_locked());
  369. bool stop_iterating = false;
  370. bool did_unblock = false;
  371. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  372. auto& info = m_blockers[i];
  373. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  374. m_blockers.remove(i);
  375. did_unblock = true;
  376. continue;
  377. }
  378. i++;
  379. }
  380. return did_unblock;
  381. }
  382. bool is_empty_locked() const
  383. {
  384. ASSERT(m_lock.is_locked());
  385. return m_blockers.is_empty();
  386. }
  387. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  388. struct BlockerInfo {
  389. Blocker* blocker;
  390. void* data;
  391. };
  392. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  393. {
  394. if (m_blockers.size() <= count)
  395. return move(m_blockers);
  396. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  397. ASSERT(move_count > 0);
  398. Vector<BlockerInfo, 4> taken_blockers;
  399. taken_blockers.ensure_capacity(move_count);
  400. for (size_t i = 0; i < move_count; i++)
  401. taken_blockers.append(m_blockers.take(i));
  402. m_blockers.remove(0, move_count);
  403. return taken_blockers;
  404. }
  405. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  406. {
  407. if (blockers_to_append.is_empty())
  408. return;
  409. if (m_blockers.is_empty()) {
  410. m_blockers = move(blockers_to_append);
  411. return;
  412. }
  413. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  414. for (size_t i = 0; i < blockers_to_append.size(); i++)
  415. m_blockers.append(blockers_to_append.take(i));
  416. blockers_to_append.clear();
  417. }
  418. mutable SpinLock<u8> m_lock;
  419. private:
  420. Vector<BlockerInfo, 4> m_blockers;
  421. };
  422. friend class JoinBlocker;
  423. class JoinBlocker final : public Blocker {
  424. public:
  425. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  426. virtual Type blocker_type() const override { return Type::Join; }
  427. virtual const char* state_string() const override { return "Joining"; }
  428. virtual bool can_be_interrupted() const override { return false; }
  429. virtual bool should_block() override { return !m_join_error && m_should_block; }
  430. virtual void not_blocking(bool) override;
  431. bool unblock(void*, bool);
  432. private:
  433. NonnullRefPtr<Thread> m_joinee;
  434. void*& m_joinee_exit_value;
  435. bool m_join_error { false };
  436. bool m_did_unblock { false };
  437. bool m_should_block { true };
  438. };
  439. class QueueBlocker : public Blocker {
  440. public:
  441. explicit QueueBlocker(WaitQueue&, const char* block_reason = nullptr);
  442. virtual ~QueueBlocker();
  443. virtual Type blocker_type() const override { return Type::Queue; }
  444. virtual const char* state_string() const override { return m_block_reason ? m_block_reason : "Queue"; }
  445. virtual void not_blocking(bool) override { }
  446. virtual bool should_block() override
  447. {
  448. return m_should_block;
  449. }
  450. bool unblock();
  451. protected:
  452. const char* const m_block_reason;
  453. bool m_should_block { true };
  454. bool m_did_unblock { false };
  455. };
  456. class FutexBlocker : public Blocker {
  457. public:
  458. explicit FutexBlocker(FutexQueue&, u32);
  459. virtual ~FutexBlocker();
  460. virtual Type blocker_type() const override { return Type::Futex; }
  461. virtual const char* state_string() const override { return "Futex"; }
  462. virtual void not_blocking(bool) override { }
  463. virtual bool should_block() override
  464. {
  465. return m_should_block;
  466. }
  467. u32 bitset() const { return m_bitset; }
  468. void begin_requeue()
  469. {
  470. // We need to hold the lock until we moved it over
  471. m_relock_flags = m_lock.lock();
  472. }
  473. void finish_requeue(FutexQueue&);
  474. bool unblock_bitset(u32 bitset);
  475. bool unblock(bool force = false);
  476. protected:
  477. u32 m_bitset;
  478. u32 m_relock_flags { 0 };
  479. bool m_should_block { true };
  480. bool m_did_unblock { false };
  481. };
  482. class FileBlocker : public Blocker {
  483. public:
  484. enum class BlockFlags : u32 {
  485. None = 0,
  486. Read = 1 << 0,
  487. Write = 1 << 1,
  488. ReadPriority = 1 << 2,
  489. Accept = 1 << 3,
  490. Connect = 1 << 4,
  491. SocketFlags = Accept | Connect,
  492. WriteNotOpen = 1 << 5,
  493. WriteError = 1 << 6,
  494. WriteHangUp = 1 << 7,
  495. ReadHangUp = 1 << 8,
  496. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  497. };
  498. virtual Type blocker_type() const override { return Type::File; }
  499. virtual bool should_block() override
  500. {
  501. return m_should_block;
  502. }
  503. virtual bool unblock(bool, void*) = 0;
  504. protected:
  505. bool m_should_block { true };
  506. };
  507. class FileDescriptionBlocker : public FileBlocker {
  508. public:
  509. const FileDescription& blocked_description() const;
  510. virtual bool unblock(bool, void*) override;
  511. virtual void not_blocking(bool) override;
  512. protected:
  513. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  514. private:
  515. NonnullRefPtr<FileDescription> m_blocked_description;
  516. const BlockFlags m_flags;
  517. BlockFlags& m_unblocked_flags;
  518. bool m_did_unblock { false };
  519. bool m_should_block { true };
  520. };
  521. class AcceptBlocker final : public FileDescriptionBlocker {
  522. public:
  523. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  524. virtual const char* state_string() const override { return "Accepting"; }
  525. };
  526. class ConnectBlocker final : public FileDescriptionBlocker {
  527. public:
  528. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  529. virtual const char* state_string() const override { return "Connecting"; }
  530. };
  531. class WriteBlocker final : public FileDescriptionBlocker {
  532. public:
  533. explicit WriteBlocker(FileDescription&, BlockFlags&);
  534. virtual const char* state_string() const override { return "Writing"; }
  535. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  536. private:
  537. BlockTimeout m_timeout;
  538. };
  539. class ReadBlocker final : public FileDescriptionBlocker {
  540. public:
  541. explicit ReadBlocker(FileDescription&, BlockFlags&);
  542. virtual const char* state_string() const override { return "Reading"; }
  543. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  544. private:
  545. BlockTimeout m_timeout;
  546. };
  547. class SleepBlocker final : public Blocker {
  548. public:
  549. explicit SleepBlocker(const BlockTimeout&, timespec* = nullptr);
  550. virtual const char* state_string() const override { return "Sleeping"; }
  551. virtual Type blocker_type() const override { return Type::Sleep; }
  552. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  553. virtual void not_blocking(bool) override;
  554. virtual void was_unblocked(bool) override;
  555. virtual Thread::BlockResult block_result() override;
  556. private:
  557. void calculate_remaining();
  558. BlockTimeout m_deadline;
  559. timespec* m_remaining;
  560. };
  561. class SelectBlocker final : public FileBlocker {
  562. public:
  563. struct FDInfo {
  564. NonnullRefPtr<FileDescription> description;
  565. BlockFlags block_flags;
  566. BlockFlags unblocked_flags { BlockFlags::None };
  567. };
  568. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  569. SelectBlocker(FDVector& fds);
  570. virtual ~SelectBlocker();
  571. virtual bool unblock(bool, void*) override;
  572. virtual void not_blocking(bool) override;
  573. virtual void was_unblocked(bool) override;
  574. virtual const char* state_string() const override { return "Selecting"; }
  575. private:
  576. size_t collect_unblocked_flags();
  577. FDVector& m_fds;
  578. bool m_did_unblock { false };
  579. };
  580. class WaitBlocker final : public Blocker {
  581. public:
  582. enum class UnblockFlags {
  583. Terminated,
  584. Stopped,
  585. Continued,
  586. Disowned
  587. };
  588. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  589. virtual const char* state_string() const override { return "Waiting"; }
  590. virtual Type blocker_type() const override { return Type::Wait; }
  591. virtual bool should_block() override { return m_should_block; }
  592. virtual void not_blocking(bool) override;
  593. virtual void was_unblocked(bool) override;
  594. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  595. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  596. private:
  597. void do_was_disowned();
  598. void do_set_result(const siginfo_t&);
  599. const int m_wait_options;
  600. const idtype_t m_id_type;
  601. const pid_t m_waitee_id;
  602. KResultOr<siginfo_t>& m_result;
  603. RefPtr<Process> m_waitee;
  604. RefPtr<ProcessGroup> m_waitee_group;
  605. bool m_did_unblock { false };
  606. bool m_error { false };
  607. bool m_got_sigchild { false };
  608. bool m_should_block;
  609. };
  610. class WaitBlockCondition final : public BlockCondition {
  611. friend class WaitBlocker;
  612. public:
  613. WaitBlockCondition(Process& process)
  614. : m_process(process)
  615. {
  616. }
  617. void disowned_by_waiter(Process&);
  618. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  619. void try_unblock(WaitBlocker&);
  620. void finalize();
  621. protected:
  622. virtual bool should_add_blocker(Blocker&, void*) override;
  623. private:
  624. struct ProcessBlockInfo {
  625. NonnullRefPtr<Process> process;
  626. WaitBlocker::UnblockFlags flags;
  627. u8 signal;
  628. bool was_waited { false };
  629. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  630. ~ProcessBlockInfo();
  631. };
  632. Process& m_process;
  633. Vector<ProcessBlockInfo, 2> m_processes;
  634. bool m_finalized { false };
  635. };
  636. template<typename AddBlockerHandler>
  637. KResult try_join(AddBlockerHandler add_blocker)
  638. {
  639. if (Thread::current() == this)
  640. return EDEADLK;
  641. ScopedSpinLock lock(m_lock);
  642. if (!m_is_joinable || state() == Dead)
  643. return EINVAL;
  644. add_blocker();
  645. // From this point on the thread is no longer joinable by anyone
  646. // else. It also means that if the join is timed, it becomes
  647. // detached when a timeout happens.
  648. m_is_joinable = false;
  649. return KSuccess;
  650. }
  651. void did_schedule() { ++m_times_scheduled; }
  652. u32 times_scheduled() const { return m_times_scheduled; }
  653. void resume_from_stopped();
  654. [[nodiscard]] bool should_be_stopped() const;
  655. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  656. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  657. [[nodiscard]] bool is_in_block() const
  658. {
  659. ScopedSpinLock lock(m_block_lock);
  660. return m_in_block;
  661. }
  662. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  663. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  664. u32 affinity() const { return m_cpu_affinity; }
  665. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  666. u32 stack_ptr() const { return m_tss.esp; }
  667. RegisterState& get_register_dump_from_stack();
  668. const RegisterState& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
  669. TSS32& tss() { return m_tss; }
  670. const TSS32& tss() const { return m_tss; }
  671. State state() const { return m_state; }
  672. const char* state_string() const;
  673. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  674. size_t thread_specific_region_size() const;
  675. size_t thread_specific_region_alignment() const;
  676. ALWAYS_INLINE void yield_if_stopped()
  677. {
  678. // If some thread stopped us, we need to yield to someone else
  679. // We check this when entering/exiting a system call. A thread
  680. // may continue to execute in user land until the next timer
  681. // tick or entering the next system call, or if it's in kernel
  682. // mode then we will intercept prior to returning back to user
  683. // mode.
  684. ScopedSpinLock lock(m_lock);
  685. while (state() == Thread::Stopped) {
  686. lock.unlock();
  687. // We shouldn't be holding the big lock here
  688. yield_while_not_holding_big_lock();
  689. lock.lock();
  690. }
  691. }
  692. template<typename T, class... Args>
  693. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  694. {
  695. ASSERT(!Processor::current().in_irq());
  696. ASSERT(this == Thread::current());
  697. ScopedCritical critical;
  698. ASSERT(!s_mm_lock.own_lock());
  699. ScopedSpinLock block_lock(m_block_lock);
  700. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  701. // as it is constructed and registered elsewhere
  702. m_in_block = true;
  703. T t(forward<Args>(args)...);
  704. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  705. // Relaxed semantics are fine for timeout_unblocked because we
  706. // synchronize on the spin locks already.
  707. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  708. RefPtr<Timer> timer;
  709. {
  710. switch (state()) {
  711. case Thread::Stopped:
  712. // It's possible that we were requested to be stopped!
  713. break;
  714. case Thread::Running:
  715. ASSERT(m_blocker == nullptr);
  716. break;
  717. default:
  718. ASSERT_NOT_REACHED();
  719. }
  720. m_blocker = &t;
  721. if (!t.should_block()) {
  722. // Don't block if the wake condition is already met
  723. t.not_blocking(false);
  724. m_blocker = nullptr;
  725. m_in_block = false;
  726. return BlockResult::NotBlocked;
  727. }
  728. auto& block_timeout = t.override_timeout(timeout);
  729. if (!block_timeout.is_infinite()) {
  730. // Process::kill_all_threads may be called at any time, which will mark all
  731. // threads to die. In that case
  732. timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  733. ASSERT(!Processor::current().in_irq());
  734. ASSERT(!g_scheduler_lock.own_lock());
  735. ASSERT(!m_block_lock.own_lock());
  736. // NOTE: this may execute on the same or any other processor!
  737. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  738. ScopedSpinLock block_lock(m_block_lock);
  739. if (m_blocker && timeout_unblocked.exchange(true) == false)
  740. unblock();
  741. });
  742. if (!timer) {
  743. // Timeout is already in the past
  744. t.not_blocking(true);
  745. m_blocker = nullptr;
  746. m_in_block = false;
  747. return BlockResult::InterruptedByTimeout;
  748. }
  749. }
  750. t.begin_blocking({});
  751. set_state(Thread::Blocked);
  752. }
  753. scheduler_lock.unlock();
  754. block_lock.unlock();
  755. dbgln<THREAD_DEBUG>("Thread {} blocking on {} ({}) -->", *this, &t, t.state_string());
  756. bool did_timeout = false;
  757. u32 lock_count_to_restore = 0;
  758. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  759. for (;;) {
  760. // Yield to the scheduler, and wait for us to resume unblocked.
  761. ASSERT(!g_scheduler_lock.own_lock());
  762. ASSERT(Processor::current().in_critical());
  763. yield_while_not_holding_big_lock();
  764. ASSERT(Processor::current().in_critical());
  765. ScopedSpinLock block_lock2(m_block_lock);
  766. if (should_be_stopped() || state() == Stopped) {
  767. dbgln("Thread should be stopped, current state: {}", state_string());
  768. set_state(Thread::Blocked);
  769. continue;
  770. }
  771. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  772. block_lock2.unlock();
  773. dbgln("Thread should not be unblocking, current state: ", state_string());
  774. set_state(Thread::Blocked);
  775. continue;
  776. }
  777. // Prevent the timeout from unblocking this thread if it happens to
  778. // be in the process of firing already
  779. did_timeout |= timeout_unblocked.exchange(true);
  780. if (m_blocker) {
  781. // Remove ourselves...
  782. ASSERT(m_blocker == &t);
  783. m_blocker = nullptr;
  784. }
  785. dbgln<THREAD_DEBUG>("<-- Thread {} unblocked from {} ({})", *this, &t, t.state_string());
  786. m_in_block = false;
  787. break;
  788. }
  789. if (t.was_interrupted_by_signal()) {
  790. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  791. ScopedSpinLock lock(m_lock);
  792. dispatch_one_pending_signal();
  793. }
  794. // Notify the blocker that we are no longer blocking. It may need
  795. // to clean up now while we're still holding m_lock
  796. auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
  797. if (timer && !did_timeout) {
  798. // Cancel the timer while not holding any locks. This allows
  799. // the timer function to complete before we remove it
  800. // (e.g. if it's on another processor)
  801. TimerQueue::the().cancel_timer(timer.release_nonnull());
  802. }
  803. if (previous_locked != LockMode::Unlocked) {
  804. // NOTE: this may trigger another call to Thread::block(), so
  805. // we need to do this after we're all done and restored m_in_block!
  806. relock_process(previous_locked, lock_count_to_restore);
  807. }
  808. return result;
  809. }
  810. void unblock_from_blocker(Blocker&);
  811. void unblock(u8 signal = 0);
  812. template<class... Args>
  813. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  814. {
  815. ASSERT(this == Thread::current());
  816. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  817. }
  818. BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr);
  819. BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr)
  820. {
  821. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  822. }
  823. BlockResult sleep_until(clockid_t, const timespec&);
  824. BlockResult sleep_until(const timespec& duration)
  825. {
  826. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  827. }
  828. // Tell this thread to unblock if needed,
  829. // gracefully unwind the stack and die.
  830. void set_should_die();
  831. [[nodiscard]] bool should_die() const { return m_should_die; }
  832. void die_if_needed();
  833. void exit(void* = nullptr);
  834. bool tick();
  835. void set_ticks_left(u32 t) { m_ticks_left = t; }
  836. u32 ticks_left() const { return m_ticks_left; }
  837. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  838. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  839. void set_state(State, u8 = 0);
  840. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  841. void set_initialized(bool initialized) { m_initialized = initialized; }
  842. void send_urgent_signal_to_self(u8 signal);
  843. void send_signal(u8 signal, Process* sender);
  844. u32 update_signal_mask(u32 signal_mask);
  845. u32 signal_mask_block(sigset_t signal_set, bool block);
  846. u32 signal_mask() const;
  847. void clear_signals();
  848. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  849. DispatchSignalResult dispatch_one_pending_signal();
  850. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  851. DispatchSignalResult dispatch_signal(u8 signal);
  852. void check_dispatch_pending_signal();
  853. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  854. void terminate_due_to_signal(u8 signal);
  855. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  856. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  857. [[nodiscard]] bool has_pending_signal(u8 signal) const;
  858. u32 pending_signals() const;
  859. u32 pending_signals_for_state() const;
  860. FPUState& fpu_state() { return *m_fpu_state; }
  861. void set_default_signal_dispositions();
  862. bool push_value_on_stack(FlatPtr);
  863. KResult make_thread_specific_region(Badge<Process>);
  864. unsigned syscall_count() const { return m_syscall_count; }
  865. void did_syscall() { ++m_syscall_count; }
  866. unsigned inode_faults() const { return m_inode_faults; }
  867. void did_inode_fault() { ++m_inode_faults; }
  868. unsigned zero_faults() const { return m_zero_faults; }
  869. void did_zero_fault() { ++m_zero_faults; }
  870. unsigned cow_faults() const { return m_cow_faults; }
  871. void did_cow_fault() { ++m_cow_faults; }
  872. unsigned file_read_bytes() const { return m_file_read_bytes; }
  873. unsigned file_write_bytes() const { return m_file_write_bytes; }
  874. void did_file_read(unsigned bytes)
  875. {
  876. m_file_read_bytes += bytes;
  877. }
  878. void did_file_write(unsigned bytes)
  879. {
  880. m_file_write_bytes += bytes;
  881. }
  882. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  883. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  884. void did_unix_socket_read(unsigned bytes)
  885. {
  886. m_unix_socket_read_bytes += bytes;
  887. }
  888. void did_unix_socket_write(unsigned bytes)
  889. {
  890. m_unix_socket_write_bytes += bytes;
  891. }
  892. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  893. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  894. void did_ipv4_socket_read(unsigned bytes)
  895. {
  896. m_ipv4_socket_read_bytes += bytes;
  897. }
  898. void did_ipv4_socket_write(unsigned bytes)
  899. {
  900. m_ipv4_socket_write_bytes += bytes;
  901. }
  902. void set_active(bool active) { m_is_active = active; }
  903. u32 saved_critical() const { return m_saved_critical; }
  904. void save_critical(u32 critical) { m_saved_critical = critical; }
  905. [[nodiscard]] bool is_active() const { return m_is_active; }
  906. [[nodiscard]] bool is_finalizable() const
  907. {
  908. // We can't finalize as long as this thread is still running
  909. // Note that checking for Running state here isn't sufficient
  910. // as the thread may not be in Running state but switching out.
  911. // m_is_active is set to false once the context switch is
  912. // complete and the thread is not executing on any processor.
  913. if (m_is_active.load(AK::memory_order_acquire))
  914. return false;
  915. // We can't finalize until the thread is either detached or
  916. // a join has started. We can't make m_is_joinable atomic
  917. // because that would introduce a race in try_join.
  918. ScopedSpinLock lock(m_lock);
  919. return !m_is_joinable;
  920. }
  921. RefPtr<Thread> clone(Process&);
  922. template<typename Callback>
  923. static IterationDecision for_each_in_state(State, Callback);
  924. template<typename Callback>
  925. static IterationDecision for_each(Callback);
  926. [[nodiscard]] static bool is_runnable_state(Thread::State state)
  927. {
  928. return state == Thread::State::Running || state == Thread::State::Runnable;
  929. }
  930. static constexpr u32 default_kernel_stack_size = 65536;
  931. static constexpr u32 default_userspace_stack_size = 4 * MiB;
  932. u32 ticks_in_user() const { return m_ticks_in_user; }
  933. u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
  934. enum class PreviousMode : u8 {
  935. KernelMode = 0,
  936. UserMode
  937. };
  938. PreviousMode previous_mode() const { return m_previous_mode; }
  939. void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
  940. TrapFrame*& current_trap() { return m_current_trap; }
  941. RecursiveSpinLock& get_lock() const { return m_lock; }
  942. #if LOCK_DEBUG
  943. void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
  944. {
  945. ASSERT(refs_delta != 0);
  946. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  947. ScopedSpinLock list_lock(m_holding_locks_lock);
  948. if (refs_delta > 0) {
  949. bool have_existing = false;
  950. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  951. auto& info = m_holding_locks_list[i];
  952. if (info.lock == &lock) {
  953. have_existing = true;
  954. info.count += refs_delta;
  955. break;
  956. }
  957. }
  958. if (!have_existing)
  959. m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
  960. } else {
  961. ASSERT(refs_delta < 0);
  962. bool found = false;
  963. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  964. auto& info = m_holding_locks_list[i];
  965. if (info.lock == &lock) {
  966. ASSERT(info.count >= (unsigned)-refs_delta);
  967. info.count -= (unsigned)-refs_delta;
  968. if (info.count == 0)
  969. m_holding_locks_list.remove(i);
  970. found = true;
  971. break;
  972. }
  973. }
  974. ASSERT(found);
  975. }
  976. }
  977. u32 lock_count() const
  978. {
  979. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  980. }
  981. #endif
  982. bool was_created() const
  983. {
  984. return m_kernel_stack_region;
  985. }
  986. bool is_handling_page_fault() const { return m_handling_page_fault; }
  987. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  988. private:
  989. IntrusiveListNode m_process_thread_list_node;
  990. int m_runnable_priority { -1 };
  991. friend class WaitQueue;
  992. class JoinBlockCondition : public BlockCondition {
  993. public:
  994. void thread_did_exit(void* exit_value)
  995. {
  996. ScopedSpinLock lock(m_lock);
  997. ASSERT(!m_thread_did_exit);
  998. m_thread_did_exit = true;
  999. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1000. do_unblock_joiner();
  1001. }
  1002. void thread_finalizing()
  1003. {
  1004. ScopedSpinLock lock(m_lock);
  1005. do_unblock_joiner();
  1006. }
  1007. void* exit_value() const
  1008. {
  1009. ASSERT(m_thread_did_exit);
  1010. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1011. }
  1012. void try_unblock(JoinBlocker& blocker)
  1013. {
  1014. ScopedSpinLock lock(m_lock);
  1015. if (m_thread_did_exit)
  1016. blocker.unblock(exit_value(), false);
  1017. }
  1018. protected:
  1019. virtual bool should_add_blocker(Blocker& b, void*) override
  1020. {
  1021. ASSERT(b.blocker_type() == Blocker::Type::Join);
  1022. auto& blocker = static_cast<JoinBlocker&>(b);
  1023. // NOTE: m_lock is held already!
  1024. if (m_thread_did_exit) {
  1025. blocker.unblock(exit_value(), true);
  1026. return false;
  1027. }
  1028. return true;
  1029. }
  1030. private:
  1031. void do_unblock_joiner()
  1032. {
  1033. do_unblock([&](Blocker& b, void*, bool&) {
  1034. ASSERT(b.blocker_type() == Blocker::Type::Join);
  1035. auto& blocker = static_cast<JoinBlocker&>(b);
  1036. return blocker.unblock(exit_value(), false);
  1037. });
  1038. }
  1039. Atomic<void*> m_exit_value { nullptr };
  1040. bool m_thread_did_exit { false };
  1041. };
  1042. LockMode unlock_process_if_locked(u32&);
  1043. void relock_process(LockMode, u32);
  1044. String backtrace_impl();
  1045. void reset_fpu_state();
  1046. mutable RecursiveSpinLock m_lock;
  1047. mutable RecursiveSpinLock m_block_lock;
  1048. NonnullRefPtr<Process> m_process;
  1049. ThreadID m_tid { -1 };
  1050. TSS32 m_tss {};
  1051. TrapFrame* m_current_trap { nullptr };
  1052. u32 m_saved_critical { 1 };
  1053. IntrusiveListNode m_ready_queue_node;
  1054. Atomic<u32> m_cpu { 0 };
  1055. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1056. u32 m_ticks_left { 0 };
  1057. u32 m_times_scheduled { 0 };
  1058. u32 m_ticks_in_user { 0 };
  1059. u32 m_ticks_in_kernel { 0 };
  1060. u32 m_pending_signals { 0 };
  1061. u32 m_signal_mask { 0 };
  1062. u32 m_kernel_stack_base { 0 };
  1063. u32 m_kernel_stack_top { 0 };
  1064. OwnPtr<Region> m_kernel_stack_region;
  1065. VirtualAddress m_thread_specific_data;
  1066. SignalActionData m_signal_action_data[32];
  1067. Blocker* m_blocker { nullptr };
  1068. #if LOCK_DEBUG
  1069. struct HoldingLockInfo {
  1070. Lock* lock;
  1071. const char* file;
  1072. int line;
  1073. unsigned count;
  1074. };
  1075. Atomic<u32> m_holding_locks { 0 };
  1076. SpinLock<u8> m_holding_locks_lock;
  1077. Vector<HoldingLockInfo> m_holding_locks_list;
  1078. #endif
  1079. JoinBlockCondition m_join_condition;
  1080. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1081. bool m_is_joinable { true };
  1082. bool m_handling_page_fault { false };
  1083. PreviousMode m_previous_mode { PreviousMode::UserMode };
  1084. unsigned m_syscall_count { 0 };
  1085. unsigned m_inode_faults { 0 };
  1086. unsigned m_zero_faults { 0 };
  1087. unsigned m_cow_faults { 0 };
  1088. unsigned m_file_read_bytes { 0 };
  1089. unsigned m_file_write_bytes { 0 };
  1090. unsigned m_unix_socket_read_bytes { 0 };
  1091. unsigned m_unix_socket_write_bytes { 0 };
  1092. unsigned m_ipv4_socket_read_bytes { 0 };
  1093. unsigned m_ipv4_socket_write_bytes { 0 };
  1094. FPUState* m_fpu_state { nullptr };
  1095. State m_state { Invalid };
  1096. String m_name;
  1097. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1098. State m_stop_state { Invalid };
  1099. bool m_dump_backtrace_on_finalization { false };
  1100. bool m_should_die { false };
  1101. bool m_initialized { false };
  1102. bool m_in_block { false };
  1103. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1104. void yield_without_holding_big_lock();
  1105. void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
  1106. void yield_while_not_holding_big_lock();
  1107. void drop_thread_count(bool);
  1108. };
  1109. template<typename Callback>
  1110. inline IterationDecision Thread::for_each(Callback callback)
  1111. {
  1112. ScopedSpinLock lock(g_tid_map_lock);
  1113. for (auto& it : *g_tid_map) {
  1114. IterationDecision decision = callback(*it.value);
  1115. if (decision != IterationDecision::Continue)
  1116. return decision;
  1117. }
  1118. return IterationDecision::Continue;
  1119. }
  1120. template<typename Callback>
  1121. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1122. {
  1123. ScopedSpinLock lock(g_tid_map_lock);
  1124. for (auto& it : *g_tid_map) {
  1125. auto& thread = *it.value;
  1126. if (thread.state() != state)
  1127. continue;
  1128. IterationDecision decision = callback(thread);
  1129. if (decision != IterationDecision::Continue)
  1130. return decision;
  1131. }
  1132. return IterationDecision::Continue;
  1133. }
  1134. const LogStream& operator<<(const LogStream&, const Thread&);
  1135. }
  1136. template<>
  1137. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1138. void format(FormatBuilder&, const Kernel::Thread&);
  1139. };