Thread.h 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/IntrusiveList.h>
  29. #include <AK/Optional.h>
  30. #include <AK/OwnPtr.h>
  31. #include <AK/String.h>
  32. #include <AK/Time.h>
  33. #include <AK/Vector.h>
  34. #include <AK/WeakPtr.h>
  35. #include <AK/Weakable.h>
  36. #include <Kernel/Arch/i386/CPU.h>
  37. #include <Kernel/Arch/i386/SafeMem.h>
  38. #include <Kernel/Debug.h>
  39. #include <Kernel/Forward.h>
  40. #include <Kernel/KResult.h>
  41. #include <Kernel/LockMode.h>
  42. #include <Kernel/Scheduler.h>
  43. #include <Kernel/ThreadTracer.h>
  44. #include <Kernel/TimerQueue.h>
  45. #include <Kernel/UnixTypes.h>
  46. #include <LibC/fd_set.h>
  47. #include <LibELF/AuxiliaryVector.h>
  48. namespace Kernel {
  49. extern RecursiveSpinLock s_mm_lock;
  50. enum class DispatchSignalResult {
  51. Deferred = 0,
  52. Yield,
  53. Terminate,
  54. Continue
  55. };
  56. struct SignalActionData {
  57. VirtualAddress handler_or_sigaction;
  58. u32 mask { 0 };
  59. int flags { 0 };
  60. };
  61. struct ThreadSpecificData {
  62. ThreadSpecificData* self;
  63. };
  64. #define THREAD_PRIORITY_MIN 1
  65. #define THREAD_PRIORITY_LOW 10
  66. #define THREAD_PRIORITY_NORMAL 30
  67. #define THREAD_PRIORITY_HIGH 50
  68. #define THREAD_PRIORITY_MAX 99
  69. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  70. class Thread
  71. : public RefCounted<Thread>
  72. , public Weakable<Thread> {
  73. AK_MAKE_NONCOPYABLE(Thread);
  74. AK_MAKE_NONMOVABLE(Thread);
  75. friend class Process;
  76. friend class Scheduler;
  77. friend class ThreadReadyQueue;
  78. public:
  79. inline static Thread* current()
  80. {
  81. return Processor::current_thread();
  82. }
  83. explicit Thread(NonnullRefPtr<Process>);
  84. ~Thread();
  85. static RefPtr<Thread> from_tid(ThreadID);
  86. static void finalize_dying_threads();
  87. ThreadID tid() const { return m_tid; }
  88. ProcessID pid() const;
  89. void set_priority(u32 p) { m_priority = p; }
  90. u32 priority() const { return m_priority; }
  91. u32 effective_priority() const { return m_priority; }
  92. void detach()
  93. {
  94. ScopedSpinLock lock(m_lock);
  95. m_is_joinable = false;
  96. }
  97. [[nodiscard]] bool is_joinable() const
  98. {
  99. ScopedSpinLock lock(m_lock);
  100. return m_is_joinable;
  101. }
  102. Process& process() { return m_process; }
  103. const Process& process() const { return m_process; }
  104. String backtrace();
  105. Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
  106. String name() const
  107. {
  108. // Because the name can be changed, we can't return a const
  109. // reference here. We must make a copy
  110. ScopedSpinLock lock(m_lock);
  111. return m_name;
  112. }
  113. void set_name(const StringView& s)
  114. {
  115. ScopedSpinLock lock(m_lock);
  116. m_name = s;
  117. }
  118. void set_name(String&& name)
  119. {
  120. ScopedSpinLock lock(m_lock);
  121. m_name = move(name);
  122. }
  123. void finalize();
  124. enum State : u8 {
  125. Invalid = 0,
  126. Runnable,
  127. Running,
  128. Dying,
  129. Dead,
  130. Stopped,
  131. Blocked
  132. };
  133. class BlockResult {
  134. public:
  135. enum Type {
  136. WokeNormally,
  137. NotBlocked,
  138. InterruptedBySignal,
  139. InterruptedByDeath,
  140. InterruptedByTimeout,
  141. };
  142. BlockResult() = delete;
  143. BlockResult(Type type)
  144. : m_type(type)
  145. {
  146. }
  147. bool operator==(Type type) const
  148. {
  149. return m_type == type;
  150. }
  151. bool operator!=(Type type) const
  152. {
  153. return m_type != type;
  154. }
  155. [[nodiscard]] bool was_interrupted() const
  156. {
  157. switch (m_type) {
  158. case InterruptedBySignal:
  159. case InterruptedByDeath:
  160. return true;
  161. default:
  162. return false;
  163. }
  164. }
  165. [[nodiscard]] bool timed_out() const
  166. {
  167. return m_type == InterruptedByTimeout;
  168. }
  169. private:
  170. Type m_type;
  171. };
  172. class BlockTimeout {
  173. public:
  174. BlockTimeout()
  175. : m_infinite(true)
  176. {
  177. }
  178. explicit BlockTimeout(bool is_absolute, const timeval* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE)
  179. : m_clock_id(clock_id)
  180. , m_infinite(!time)
  181. {
  182. if (!m_infinite) {
  183. if (time->tv_sec > 0 || time->tv_usec > 0) {
  184. timeval_to_timespec(*time, m_time);
  185. m_should_block = true;
  186. }
  187. m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id).value();
  188. if (!is_absolute)
  189. timespec_add(m_time, m_start_time, m_time);
  190. }
  191. }
  192. explicit BlockTimeout(bool is_absolute, const timespec* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE)
  193. : m_clock_id(clock_id)
  194. , m_infinite(!time)
  195. {
  196. if (!m_infinite) {
  197. if (time->tv_sec > 0 || time->tv_nsec > 0) {
  198. m_time = *time;
  199. m_should_block = true;
  200. }
  201. m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id).value();
  202. if (!is_absolute)
  203. timespec_add(m_time, m_start_time, m_time);
  204. }
  205. }
  206. const timespec& absolute_time() const { return m_time; }
  207. const timespec* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  208. clockid_t clock_id() const { return m_clock_id; }
  209. bool is_infinite() const { return m_infinite; }
  210. bool should_block() const { return m_infinite || m_should_block; };
  211. private:
  212. timespec m_time { 0, 0 };
  213. timespec m_start_time { 0, 0 };
  214. clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
  215. bool m_infinite { false };
  216. bool m_should_block { false };
  217. };
  218. class BlockCondition;
  219. class Blocker {
  220. public:
  221. enum class Type {
  222. Unknown = 0,
  223. File,
  224. Futex,
  225. Plan9FS,
  226. Join,
  227. Queue,
  228. Routing,
  229. Sleep,
  230. Wait
  231. };
  232. virtual ~Blocker();
  233. virtual const char* state_string() const = 0;
  234. virtual bool should_block() { return true; }
  235. virtual Type blocker_type() const = 0;
  236. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  237. virtual bool can_be_interrupted() const { return true; }
  238. virtual void not_blocking(bool) = 0;
  239. virtual void was_unblocked(bool did_timeout)
  240. {
  241. if (did_timeout) {
  242. ScopedSpinLock lock(m_lock);
  243. m_did_timeout = true;
  244. }
  245. }
  246. void set_interrupted_by_death()
  247. {
  248. ScopedSpinLock lock(m_lock);
  249. do_set_interrupted_by_death();
  250. }
  251. void set_interrupted_by_signal(u8 signal)
  252. {
  253. ScopedSpinLock lock(m_lock);
  254. do_set_interrupted_by_signal(signal);
  255. }
  256. u8 was_interrupted_by_signal() const
  257. {
  258. ScopedSpinLock lock(m_lock);
  259. return do_get_interrupted_by_signal();
  260. }
  261. virtual Thread::BlockResult block_result()
  262. {
  263. ScopedSpinLock lock(m_lock);
  264. if (m_was_interrupted_by_death)
  265. return Thread::BlockResult::InterruptedByDeath;
  266. if (m_was_interrupted_by_signal != 0)
  267. return Thread::BlockResult::InterruptedBySignal;
  268. if (m_did_timeout)
  269. return Thread::BlockResult::InterruptedByTimeout;
  270. return Thread::BlockResult::WokeNormally;
  271. }
  272. void begin_blocking(Badge<Thread>);
  273. BlockResult end_blocking(Badge<Thread>, bool);
  274. protected:
  275. void do_set_interrupted_by_death()
  276. {
  277. m_was_interrupted_by_death = true;
  278. }
  279. void do_set_interrupted_by_signal(u8 signal)
  280. {
  281. ASSERT(signal != 0);
  282. m_was_interrupted_by_signal = signal;
  283. }
  284. void do_clear_interrupted_by_signal()
  285. {
  286. m_was_interrupted_by_signal = 0;
  287. }
  288. u8 do_get_interrupted_by_signal() const
  289. {
  290. return m_was_interrupted_by_signal;
  291. }
  292. [[nodiscard]] bool was_interrupted() const
  293. {
  294. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  295. }
  296. void unblock_from_blocker()
  297. {
  298. RefPtr<Thread> thread;
  299. {
  300. ScopedSpinLock lock(m_lock);
  301. if (m_is_blocking) {
  302. m_is_blocking = false;
  303. ASSERT(m_blocked_thread);
  304. thread = m_blocked_thread;
  305. }
  306. }
  307. if (thread)
  308. thread->unblock_from_blocker(*this);
  309. }
  310. bool set_block_condition(BlockCondition&, void* = nullptr);
  311. void set_block_condition_raw_locked(BlockCondition* block_condition)
  312. {
  313. m_block_condition = block_condition;
  314. }
  315. mutable RecursiveSpinLock m_lock;
  316. private:
  317. BlockCondition* m_block_condition { nullptr };
  318. void* m_block_data { nullptr };
  319. Thread* m_blocked_thread { nullptr };
  320. u8 m_was_interrupted_by_signal { 0 };
  321. bool m_is_blocking { false };
  322. bool m_was_interrupted_by_death { false };
  323. bool m_did_timeout { false };
  324. };
  325. class BlockCondition {
  326. AK_MAKE_NONCOPYABLE(BlockCondition);
  327. AK_MAKE_NONMOVABLE(BlockCondition);
  328. public:
  329. BlockCondition() = default;
  330. virtual ~BlockCondition()
  331. {
  332. ScopedSpinLock lock(m_lock);
  333. ASSERT(m_blockers.is_empty());
  334. }
  335. bool add_blocker(Blocker& blocker, void* data)
  336. {
  337. ScopedSpinLock lock(m_lock);
  338. if (!should_add_blocker(blocker, data))
  339. return false;
  340. m_blockers.append({ &blocker, data });
  341. return true;
  342. }
  343. void remove_blocker(Blocker& blocker, void* data)
  344. {
  345. ScopedSpinLock lock(m_lock);
  346. // NOTE: it's possible that the blocker is no longer present
  347. m_blockers.remove_first_matching([&](auto& info) {
  348. return info.blocker == &blocker && info.data == data;
  349. });
  350. }
  351. bool is_empty() const
  352. {
  353. ScopedSpinLock lock(m_lock);
  354. return is_empty_locked();
  355. }
  356. protected:
  357. template<typename UnblockOne>
  358. bool unblock(UnblockOne unblock_one)
  359. {
  360. ScopedSpinLock lock(m_lock);
  361. return do_unblock(unblock_one);
  362. }
  363. template<typename UnblockOne>
  364. bool do_unblock(UnblockOne unblock_one)
  365. {
  366. ASSERT(m_lock.is_locked());
  367. bool stop_iterating = false;
  368. bool did_unblock = false;
  369. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  370. auto& info = m_blockers[i];
  371. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  372. m_blockers.remove(i);
  373. did_unblock = true;
  374. continue;
  375. }
  376. i++;
  377. }
  378. return did_unblock;
  379. }
  380. bool is_empty_locked() const
  381. {
  382. ASSERT(m_lock.is_locked());
  383. return m_blockers.is_empty();
  384. }
  385. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  386. struct BlockerInfo {
  387. Blocker* blocker;
  388. void* data;
  389. };
  390. Vector<BlockerInfo, 4> do_take_blockers(size_t count)
  391. {
  392. if (m_blockers.size() <= count)
  393. return move(m_blockers);
  394. size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
  395. ASSERT(move_count > 0);
  396. Vector<BlockerInfo, 4> taken_blockers;
  397. taken_blockers.ensure_capacity(move_count);
  398. for (size_t i = 0; i < move_count; i++)
  399. taken_blockers.append(m_blockers.take(i));
  400. m_blockers.remove(0, move_count);
  401. return taken_blockers;
  402. }
  403. void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
  404. {
  405. if (blockers_to_append.is_empty())
  406. return;
  407. if (m_blockers.is_empty()) {
  408. m_blockers = move(blockers_to_append);
  409. return;
  410. }
  411. m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
  412. for (size_t i = 0; i < blockers_to_append.size(); i++)
  413. m_blockers.append(blockers_to_append.take(i));
  414. blockers_to_append.clear();
  415. }
  416. mutable SpinLock<u8> m_lock;
  417. private:
  418. Vector<BlockerInfo, 4> m_blockers;
  419. };
  420. friend class JoinBlocker;
  421. class JoinBlocker final : public Blocker {
  422. public:
  423. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  424. virtual Type blocker_type() const override { return Type::Join; }
  425. virtual const char* state_string() const override { return "Joining"; }
  426. virtual bool can_be_interrupted() const override { return false; }
  427. virtual bool should_block() override { return !m_join_error && m_should_block; }
  428. virtual void not_blocking(bool) override;
  429. bool unblock(void*, bool);
  430. private:
  431. NonnullRefPtr<Thread> m_joinee;
  432. void*& m_joinee_exit_value;
  433. bool m_join_error { false };
  434. bool m_did_unblock { false };
  435. bool m_should_block { true };
  436. };
  437. class QueueBlocker : public Blocker {
  438. public:
  439. explicit QueueBlocker(WaitQueue&, const char* block_reason = nullptr);
  440. virtual ~QueueBlocker();
  441. virtual Type blocker_type() const override { return Type::Queue; }
  442. virtual const char* state_string() const override { return m_block_reason ? m_block_reason : "Queue"; }
  443. virtual void not_blocking(bool) override { }
  444. virtual bool should_block() override
  445. {
  446. return m_should_block;
  447. }
  448. bool unblock();
  449. protected:
  450. const char* const m_block_reason;
  451. bool m_should_block { true };
  452. bool m_did_unblock { false };
  453. };
  454. class FutexBlocker : public Blocker {
  455. public:
  456. explicit FutexBlocker(FutexQueue&, u32);
  457. virtual ~FutexBlocker();
  458. virtual Type blocker_type() const override { return Type::Futex; }
  459. virtual const char* state_string() const override { return "Futex"; }
  460. virtual void not_blocking(bool) override { }
  461. virtual bool should_block() override
  462. {
  463. return m_should_block;
  464. }
  465. u32 bitset() const { return m_bitset; }
  466. void begin_requeue()
  467. {
  468. // We need to hold the lock until we moved it over
  469. m_relock_flags = m_lock.lock();
  470. }
  471. void finish_requeue(FutexQueue&);
  472. bool unblock_bitset(u32 bitset);
  473. bool unblock(bool force = false);
  474. protected:
  475. u32 m_bitset;
  476. u32 m_relock_flags { 0 };
  477. bool m_should_block { true };
  478. bool m_did_unblock { false };
  479. };
  480. class FileBlocker : public Blocker {
  481. public:
  482. enum class BlockFlags : u32 {
  483. None = 0,
  484. Read = 1 << 0,
  485. Write = 1 << 1,
  486. ReadPriority = 1 << 2,
  487. Accept = 1 << 3,
  488. Connect = 1 << 4,
  489. SocketFlags = Accept | Connect,
  490. WriteNotOpen = 1 << 5,
  491. WriteError = 1 << 6,
  492. WriteHangUp = 1 << 7,
  493. ReadHangUp = 1 << 8,
  494. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  495. };
  496. virtual Type blocker_type() const override { return Type::File; }
  497. virtual bool should_block() override
  498. {
  499. return m_should_block;
  500. }
  501. virtual bool unblock(bool, void*) = 0;
  502. protected:
  503. bool m_should_block { true };
  504. };
  505. class FileDescriptionBlocker : public FileBlocker {
  506. public:
  507. const FileDescription& blocked_description() const;
  508. virtual bool unblock(bool, void*) override;
  509. virtual void not_blocking(bool) override;
  510. protected:
  511. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  512. private:
  513. NonnullRefPtr<FileDescription> m_blocked_description;
  514. const BlockFlags m_flags;
  515. BlockFlags& m_unblocked_flags;
  516. bool m_did_unblock { false };
  517. bool m_should_block { true };
  518. };
  519. class AcceptBlocker final : public FileDescriptionBlocker {
  520. public:
  521. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  522. virtual const char* state_string() const override { return "Accepting"; }
  523. };
  524. class ConnectBlocker final : public FileDescriptionBlocker {
  525. public:
  526. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  527. virtual const char* state_string() const override { return "Connecting"; }
  528. };
  529. class WriteBlocker final : public FileDescriptionBlocker {
  530. public:
  531. explicit WriteBlocker(FileDescription&, BlockFlags&);
  532. virtual const char* state_string() const override { return "Writing"; }
  533. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  534. private:
  535. BlockTimeout m_timeout;
  536. };
  537. class ReadBlocker final : public FileDescriptionBlocker {
  538. public:
  539. explicit ReadBlocker(FileDescription&, BlockFlags&);
  540. virtual const char* state_string() const override { return "Reading"; }
  541. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  542. private:
  543. BlockTimeout m_timeout;
  544. };
  545. class SleepBlocker final : public Blocker {
  546. public:
  547. explicit SleepBlocker(const BlockTimeout&, timespec* = nullptr);
  548. virtual const char* state_string() const override { return "Sleeping"; }
  549. virtual Type blocker_type() const override { return Type::Sleep; }
  550. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  551. virtual void not_blocking(bool) override;
  552. virtual void was_unblocked(bool) override;
  553. virtual Thread::BlockResult block_result() override;
  554. private:
  555. void calculate_remaining();
  556. BlockTimeout m_deadline;
  557. timespec* m_remaining;
  558. };
  559. class SelectBlocker final : public FileBlocker {
  560. public:
  561. struct FDInfo {
  562. NonnullRefPtr<FileDescription> description;
  563. BlockFlags block_flags;
  564. BlockFlags unblocked_flags { BlockFlags::None };
  565. };
  566. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  567. SelectBlocker(FDVector& fds);
  568. virtual ~SelectBlocker();
  569. virtual bool unblock(bool, void*) override;
  570. virtual void not_blocking(bool) override;
  571. virtual void was_unblocked(bool) override;
  572. virtual const char* state_string() const override { return "Selecting"; }
  573. private:
  574. size_t collect_unblocked_flags();
  575. FDVector& m_fds;
  576. bool m_did_unblock { false };
  577. };
  578. class WaitBlocker final : public Blocker {
  579. public:
  580. enum class UnblockFlags {
  581. Terminated,
  582. Stopped,
  583. Continued,
  584. Disowned
  585. };
  586. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  587. virtual const char* state_string() const override { return "Waiting"; }
  588. virtual Type blocker_type() const override { return Type::Wait; }
  589. virtual bool should_block() override { return m_should_block; }
  590. virtual void not_blocking(bool) override;
  591. virtual void was_unblocked(bool) override;
  592. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  593. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  594. private:
  595. void do_was_disowned();
  596. void do_set_result(const siginfo_t&);
  597. const int m_wait_options;
  598. const idtype_t m_id_type;
  599. const pid_t m_waitee_id;
  600. KResultOr<siginfo_t>& m_result;
  601. RefPtr<Process> m_waitee;
  602. RefPtr<ProcessGroup> m_waitee_group;
  603. bool m_did_unblock { false };
  604. bool m_error { false };
  605. bool m_got_sigchild { false };
  606. bool m_should_block;
  607. };
  608. class WaitBlockCondition final : public BlockCondition {
  609. friend class WaitBlocker;
  610. public:
  611. WaitBlockCondition(Process& process)
  612. : m_process(process)
  613. {
  614. }
  615. void disowned_by_waiter(Process&);
  616. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  617. void try_unblock(WaitBlocker&);
  618. void finalize();
  619. protected:
  620. virtual bool should_add_blocker(Blocker&, void*) override;
  621. private:
  622. struct ProcessBlockInfo {
  623. NonnullRefPtr<Process> process;
  624. WaitBlocker::UnblockFlags flags;
  625. u8 signal;
  626. bool was_waited { false };
  627. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  628. ~ProcessBlockInfo();
  629. };
  630. Process& m_process;
  631. Vector<ProcessBlockInfo, 2> m_processes;
  632. bool m_finalized { false };
  633. };
  634. template<typename AddBlockerHandler>
  635. KResult try_join(AddBlockerHandler add_blocker)
  636. {
  637. if (Thread::current() == this)
  638. return EDEADLK;
  639. ScopedSpinLock lock(m_lock);
  640. if (!m_is_joinable || state() == Dead)
  641. return EINVAL;
  642. add_blocker();
  643. // From this point on the thread is no longer joinable by anyone
  644. // else. It also means that if the join is timed, it becomes
  645. // detached when a timeout happens.
  646. m_is_joinable = false;
  647. return KSuccess;
  648. }
  649. void did_schedule() { ++m_times_scheduled; }
  650. u32 times_scheduled() const { return m_times_scheduled; }
  651. void resume_from_stopped();
  652. [[nodiscard]] bool should_be_stopped() const;
  653. [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
  654. [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
  655. [[nodiscard]] bool is_in_block() const
  656. {
  657. ScopedSpinLock lock(m_block_lock);
  658. return m_in_block;
  659. }
  660. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  661. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  662. u32 affinity() const { return m_cpu_affinity; }
  663. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  664. u32 stack_ptr() const { return m_tss.esp; }
  665. RegisterState& get_register_dump_from_stack();
  666. TSS32& tss() { return m_tss; }
  667. const TSS32& tss() const { return m_tss; }
  668. State state() const { return m_state; }
  669. const char* state_string() const;
  670. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  671. size_t thread_specific_region_size() const;
  672. size_t thread_specific_region_alignment() const;
  673. ALWAYS_INLINE void yield_if_stopped()
  674. {
  675. // If some thread stopped us, we need to yield to someone else
  676. // We check this when entering/exiting a system call. A thread
  677. // may continue to execute in user land until the next timer
  678. // tick or entering the next system call, or if it's in kernel
  679. // mode then we will intercept prior to returning back to user
  680. // mode.
  681. ScopedSpinLock lock(m_lock);
  682. while (state() == Thread::Stopped) {
  683. lock.unlock();
  684. // We shouldn't be holding the big lock here
  685. yield_while_not_holding_big_lock();
  686. lock.lock();
  687. }
  688. }
  689. template<typename T, class... Args>
  690. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  691. {
  692. ASSERT(!Processor::current().in_irq());
  693. ASSERT(this == Thread::current());
  694. ScopedCritical critical;
  695. ASSERT(!s_mm_lock.own_lock());
  696. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  697. ScopedSpinLock block_lock(m_block_lock);
  698. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  699. // as it is constructed and registered elsewhere
  700. m_in_block = true;
  701. T t(forward<Args>(args)...);
  702. // Relaxed semantics are fine for timeout_unblocked because we
  703. // synchronize on the spin locks already.
  704. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
  705. RefPtr<Timer> timer;
  706. {
  707. switch (state()) {
  708. case Thread::Stopped:
  709. // It's possible that we were requested to be stopped!
  710. break;
  711. case Thread::Running:
  712. ASSERT(m_blocker == nullptr);
  713. break;
  714. default:
  715. ASSERT_NOT_REACHED();
  716. }
  717. m_blocker = &t;
  718. if (!t.should_block()) {
  719. // Don't block if the wake condition is already met
  720. t.not_blocking(false);
  721. m_blocker = nullptr;
  722. m_in_block = false;
  723. return BlockResult::NotBlocked;
  724. }
  725. auto& block_timeout = t.override_timeout(timeout);
  726. if (!block_timeout.is_infinite()) {
  727. // Process::kill_all_threads may be called at any time, which will mark all
  728. // threads to die. In that case
  729. timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  730. ASSERT(!Processor::current().in_irq());
  731. ASSERT(!g_scheduler_lock.own_lock());
  732. ASSERT(!m_block_lock.own_lock());
  733. // NOTE: this may execute on the same or any other processor!
  734. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  735. ScopedSpinLock block_lock(m_block_lock);
  736. if (m_blocker && timeout_unblocked.exchange(true) == false)
  737. unblock();
  738. });
  739. if (!timer) {
  740. // Timeout is already in the past
  741. t.not_blocking(true);
  742. m_blocker = nullptr;
  743. m_in_block = false;
  744. return BlockResult::InterruptedByTimeout;
  745. }
  746. }
  747. t.begin_blocking({});
  748. set_state(Thread::Blocked);
  749. }
  750. block_lock.unlock();
  751. bool did_timeout = false;
  752. auto previous_locked = LockMode::Unlocked;
  753. u32 lock_count_to_restore = 0;
  754. for (;;) {
  755. scheduler_lock.unlock();
  756. // Yield to the scheduler, and wait for us to resume unblocked.
  757. if (previous_locked == LockMode::Unlocked)
  758. previous_locked = unlock_process_if_locked(lock_count_to_restore);
  759. ASSERT(!g_scheduler_lock.own_lock());
  760. ASSERT(Processor::current().in_critical());
  761. yield_while_not_holding_big_lock();
  762. scheduler_lock.lock();
  763. ScopedSpinLock block_lock2(m_block_lock);
  764. if (should_be_stopped() || state() == Stopped) {
  765. dbgln("Thread should be stopped, current state: {}", state_string());
  766. set_state(Thread::Blocked);
  767. continue;
  768. }
  769. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  770. block_lock2.unlock();
  771. dbgln("Thread should not be unblocking, current state: ", state_string());
  772. set_state(Thread::Blocked);
  773. continue;
  774. }
  775. // Prevent the timeout from unblocking this thread if it happens to
  776. // be in the process of firing already
  777. did_timeout |= timeout_unblocked.exchange(true);
  778. if (m_blocker) {
  779. // Remove ourselves...
  780. ASSERT(m_blocker == &t);
  781. m_blocker = nullptr;
  782. }
  783. m_in_block = false;
  784. break;
  785. }
  786. if (t.was_interrupted_by_signal()) {
  787. ScopedSpinLock lock(m_lock);
  788. dispatch_one_pending_signal();
  789. }
  790. // Notify the blocker that we are no longer blocking. It may need
  791. // to clean up now while we're still holding m_lock
  792. auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
  793. scheduler_lock.unlock();
  794. if (timer && !did_timeout) {
  795. // Cancel the timer while not holding any locks. This allows
  796. // the timer function to complete before we remove it
  797. // (e.g. if it's on another processor)
  798. TimerQueue::the().cancel_timer(timer.release_nonnull());
  799. }
  800. if (previous_locked != LockMode::Unlocked) {
  801. // NOTE: this may trigger another call to Thread::block(), so
  802. // we need to do this after we're all done and restored m_in_block!
  803. relock_process(previous_locked, lock_count_to_restore);
  804. }
  805. return result;
  806. }
  807. void unblock_from_blocker(Blocker&);
  808. void unblock(u8 signal = 0);
  809. template<class... Args>
  810. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  811. {
  812. ASSERT(this == Thread::current());
  813. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  814. }
  815. BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr);
  816. BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr)
  817. {
  818. return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
  819. }
  820. BlockResult sleep_until(clockid_t, const timespec&);
  821. BlockResult sleep_until(const timespec& duration)
  822. {
  823. return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
  824. }
  825. // Tell this thread to unblock if needed,
  826. // gracefully unwind the stack and die.
  827. void set_should_die();
  828. [[nodiscard]] bool should_die() const { return m_should_die; }
  829. void die_if_needed();
  830. void exit(void* = nullptr);
  831. bool tick();
  832. void set_ticks_left(u32 t) { m_ticks_left = t; }
  833. u32 ticks_left() const { return m_ticks_left; }
  834. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  835. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  836. void set_state(State, u8 = 0);
  837. [[nodiscard]] bool is_initialized() const { return m_initialized; }
  838. void set_initialized(bool initialized) { m_initialized = initialized; }
  839. void send_urgent_signal_to_self(u8 signal);
  840. void send_signal(u8 signal, Process* sender);
  841. u32 update_signal_mask(u32 signal_mask);
  842. u32 signal_mask_block(sigset_t signal_set, bool block);
  843. u32 signal_mask() const;
  844. void clear_signals();
  845. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  846. DispatchSignalResult dispatch_one_pending_signal();
  847. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  848. DispatchSignalResult dispatch_signal(u8 signal);
  849. void check_dispatch_pending_signal();
  850. [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  851. void terminate_due_to_signal(u8 signal);
  852. [[nodiscard]] bool should_ignore_signal(u8 signal) const;
  853. [[nodiscard]] bool has_signal_handler(u8 signal) const;
  854. [[nodiscard]] bool has_pending_signal(u8 signal) const;
  855. u32 pending_signals() const;
  856. u32 pending_signals_for_state() const;
  857. FPUState& fpu_state() { return *m_fpu_state; }
  858. void set_default_signal_dispositions();
  859. bool push_value_on_stack(FlatPtr);
  860. KResult make_thread_specific_region(Badge<Process>);
  861. unsigned syscall_count() const { return m_syscall_count; }
  862. void did_syscall() { ++m_syscall_count; }
  863. unsigned inode_faults() const { return m_inode_faults; }
  864. void did_inode_fault() { ++m_inode_faults; }
  865. unsigned zero_faults() const { return m_zero_faults; }
  866. void did_zero_fault() { ++m_zero_faults; }
  867. unsigned cow_faults() const { return m_cow_faults; }
  868. void did_cow_fault() { ++m_cow_faults; }
  869. unsigned file_read_bytes() const { return m_file_read_bytes; }
  870. unsigned file_write_bytes() const { return m_file_write_bytes; }
  871. void did_file_read(unsigned bytes)
  872. {
  873. m_file_read_bytes += bytes;
  874. }
  875. void did_file_write(unsigned bytes)
  876. {
  877. m_file_write_bytes += bytes;
  878. }
  879. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  880. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  881. void did_unix_socket_read(unsigned bytes)
  882. {
  883. m_unix_socket_read_bytes += bytes;
  884. }
  885. void did_unix_socket_write(unsigned bytes)
  886. {
  887. m_unix_socket_write_bytes += bytes;
  888. }
  889. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  890. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  891. void did_ipv4_socket_read(unsigned bytes)
  892. {
  893. m_ipv4_socket_read_bytes += bytes;
  894. }
  895. void did_ipv4_socket_write(unsigned bytes)
  896. {
  897. m_ipv4_socket_write_bytes += bytes;
  898. }
  899. void set_active(bool active) { m_is_active = active; }
  900. u32 saved_critical() const { return m_saved_critical; }
  901. void save_critical(u32 critical) { m_saved_critical = critical; }
  902. [[nodiscard]] bool is_active() const { return m_is_active; }
  903. [[nodiscard]] bool is_finalizable() const
  904. {
  905. // We can't finalize as long as this thread is still running
  906. // Note that checking for Running state here isn't sufficient
  907. // as the thread may not be in Running state but switching out.
  908. // m_is_active is set to false once the context switch is
  909. // complete and the thread is not executing on any processor.
  910. if (m_is_active.load(AK::memory_order_acquire))
  911. return false;
  912. // We can't finalize until the thread is either detached or
  913. // a join has started. We can't make m_is_joinable atomic
  914. // because that would introduce a race in try_join.
  915. ScopedSpinLock lock(m_lock);
  916. return !m_is_joinable;
  917. }
  918. RefPtr<Thread> clone(Process&);
  919. template<typename Callback>
  920. static IterationDecision for_each_in_state(State, Callback);
  921. template<typename Callback>
  922. static IterationDecision for_each_living(Callback);
  923. template<typename Callback>
  924. static IterationDecision for_each(Callback);
  925. [[nodiscard]] static bool is_runnable_state(Thread::State state)
  926. {
  927. return state == Thread::State::Running || state == Thread::State::Runnable;
  928. }
  929. static constexpr u32 default_kernel_stack_size = 65536;
  930. static constexpr u32 default_userspace_stack_size = 4 * MiB;
  931. u32 ticks_in_user() const { return m_ticks_in_user; }
  932. u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
  933. enum class PreviousMode : u8 {
  934. KernelMode = 0,
  935. UserMode
  936. };
  937. PreviousMode previous_mode() const { return m_previous_mode; }
  938. void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
  939. TrapFrame*& current_trap() { return m_current_trap; }
  940. RecursiveSpinLock& get_lock() const { return m_lock; }
  941. #if LOCK_DEBUG
  942. void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
  943. {
  944. ASSERT(refs_delta != 0);
  945. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  946. ScopedSpinLock list_lock(m_holding_locks_lock);
  947. if (refs_delta > 0) {
  948. bool have_existing = false;
  949. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  950. auto& info = m_holding_locks_list[i];
  951. if (info.lock == &lock) {
  952. have_existing = true;
  953. info.count += refs_delta;
  954. break;
  955. }
  956. }
  957. if (!have_existing)
  958. m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
  959. } else {
  960. ASSERT(refs_delta < 0);
  961. bool found = false;
  962. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  963. auto& info = m_holding_locks_list[i];
  964. if (info.lock == &lock) {
  965. ASSERT(info.count >= (unsigned)-refs_delta);
  966. info.count -= (unsigned)-refs_delta;
  967. if (info.count == 0)
  968. m_holding_locks_list.remove(i);
  969. found = true;
  970. break;
  971. }
  972. }
  973. ASSERT(found);
  974. }
  975. }
  976. u32 lock_count() const
  977. {
  978. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  979. }
  980. #endif
  981. bool was_created() const
  982. {
  983. return m_kernel_stack_region;
  984. }
  985. bool is_handling_page_fault() const { return m_handling_page_fault; }
  986. void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
  987. private:
  988. IntrusiveListNode m_process_thread_list_node;
  989. IntrusiveListNode m_runnable_list_node;
  990. int m_runnable_priority { -1 };
  991. friend struct SchedulerData;
  992. friend class WaitQueue;
  993. class JoinBlockCondition : public BlockCondition {
  994. public:
  995. void thread_did_exit(void* exit_value)
  996. {
  997. ScopedSpinLock lock(m_lock);
  998. ASSERT(!m_thread_did_exit);
  999. m_thread_did_exit = true;
  1000. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  1001. do_unblock_joiner();
  1002. }
  1003. void thread_finalizing()
  1004. {
  1005. ScopedSpinLock lock(m_lock);
  1006. do_unblock_joiner();
  1007. }
  1008. void* exit_value() const
  1009. {
  1010. ASSERT(m_thread_did_exit);
  1011. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  1012. }
  1013. void try_unblock(JoinBlocker& blocker)
  1014. {
  1015. ScopedSpinLock lock(m_lock);
  1016. if (m_thread_did_exit)
  1017. blocker.unblock(exit_value(), false);
  1018. }
  1019. protected:
  1020. virtual bool should_add_blocker(Blocker& b, void*) override
  1021. {
  1022. ASSERT(b.blocker_type() == Blocker::Type::Join);
  1023. auto& blocker = static_cast<JoinBlocker&>(b);
  1024. // NOTE: m_lock is held already!
  1025. if (m_thread_did_exit) {
  1026. blocker.unblock(exit_value(), true);
  1027. return false;
  1028. }
  1029. return true;
  1030. }
  1031. private:
  1032. void do_unblock_joiner()
  1033. {
  1034. do_unblock([&](Blocker& b, void*, bool&) {
  1035. ASSERT(b.blocker_type() == Blocker::Type::Join);
  1036. auto& blocker = static_cast<JoinBlocker&>(b);
  1037. return blocker.unblock(exit_value(), false);
  1038. });
  1039. }
  1040. Atomic<void*> m_exit_value { nullptr };
  1041. bool m_thread_did_exit { false };
  1042. };
  1043. LockMode unlock_process_if_locked(u32&);
  1044. void relock_process(LockMode, u32);
  1045. String backtrace_impl();
  1046. void reset_fpu_state();
  1047. mutable RecursiveSpinLock m_lock;
  1048. mutable RecursiveSpinLock m_block_lock;
  1049. NonnullRefPtr<Process> m_process;
  1050. ThreadID m_tid { -1 };
  1051. TSS32 m_tss;
  1052. TrapFrame* m_current_trap { nullptr };
  1053. u32 m_saved_critical { 1 };
  1054. IntrusiveListNode m_ready_queue_node;
  1055. Atomic<u32> m_cpu { 0 };
  1056. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1057. u32 m_ticks_left { 0 };
  1058. u32 m_times_scheduled { 0 };
  1059. u32 m_ticks_in_user { 0 };
  1060. u32 m_ticks_in_kernel { 0 };
  1061. u32 m_pending_signals { 0 };
  1062. u32 m_signal_mask { 0 };
  1063. u32 m_kernel_stack_base { 0 };
  1064. u32 m_kernel_stack_top { 0 };
  1065. OwnPtr<Region> m_kernel_stack_region;
  1066. VirtualAddress m_thread_specific_data;
  1067. SignalActionData m_signal_action_data[32];
  1068. Blocker* m_blocker { nullptr };
  1069. #if LOCK_DEBUG
  1070. struct HoldingLockInfo {
  1071. Lock* lock;
  1072. const char* file;
  1073. int line;
  1074. unsigned count;
  1075. };
  1076. Atomic<u32> m_holding_locks { 0 };
  1077. SpinLock<u8> m_holding_locks_lock;
  1078. Vector<HoldingLockInfo> m_holding_locks_list;
  1079. #endif
  1080. JoinBlockCondition m_join_condition;
  1081. Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
  1082. bool m_is_joinable { true };
  1083. bool m_handling_page_fault { false };
  1084. PreviousMode m_previous_mode { PreviousMode::UserMode };
  1085. unsigned m_syscall_count { 0 };
  1086. unsigned m_inode_faults { 0 };
  1087. unsigned m_zero_faults { 0 };
  1088. unsigned m_cow_faults { 0 };
  1089. unsigned m_file_read_bytes { 0 };
  1090. unsigned m_file_write_bytes { 0 };
  1091. unsigned m_unix_socket_read_bytes { 0 };
  1092. unsigned m_unix_socket_write_bytes { 0 };
  1093. unsigned m_ipv4_socket_read_bytes { 0 };
  1094. unsigned m_ipv4_socket_write_bytes { 0 };
  1095. FPUState* m_fpu_state { nullptr };
  1096. State m_state { Invalid };
  1097. String m_name;
  1098. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1099. State m_stop_state { Invalid };
  1100. bool m_dump_backtrace_on_finalization { false };
  1101. bool m_should_die { false };
  1102. bool m_initialized { false };
  1103. bool m_in_block { false };
  1104. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1105. void yield_without_holding_big_lock();
  1106. void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
  1107. void yield_while_not_holding_big_lock();
  1108. void update_state_for_thread(Thread::State previous_state);
  1109. void drop_thread_count(bool);
  1110. };
  1111. template<typename Callback>
  1112. inline IterationDecision Thread::for_each_living(Callback callback)
  1113. {
  1114. ASSERT_INTERRUPTS_DISABLED();
  1115. return Thread::for_each([callback](Thread& thread) -> IterationDecision {
  1116. if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
  1117. return callback(thread);
  1118. return IterationDecision::Continue;
  1119. });
  1120. }
  1121. template<typename Callback>
  1122. inline IterationDecision Thread::for_each(Callback callback)
  1123. {
  1124. ASSERT_INTERRUPTS_DISABLED();
  1125. ScopedSpinLock lock(g_scheduler_lock);
  1126. auto ret = Scheduler::for_each_runnable(callback);
  1127. if (ret == IterationDecision::Break)
  1128. return ret;
  1129. return Scheduler::for_each_nonrunnable(callback);
  1130. }
  1131. template<typename Callback>
  1132. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1133. {
  1134. ASSERT_INTERRUPTS_DISABLED();
  1135. ScopedSpinLock lock(g_scheduler_lock);
  1136. auto new_callback = [=](Thread& thread) -> IterationDecision {
  1137. if (thread.state() == state)
  1138. return callback(thread);
  1139. return IterationDecision::Continue;
  1140. };
  1141. if (is_runnable_state(state))
  1142. return Scheduler::for_each_runnable(new_callback);
  1143. return Scheduler::for_each_nonrunnable(new_callback);
  1144. }
  1145. const LogStream& operator<<(const LogStream&, const Thread&);
  1146. struct SchedulerData {
  1147. typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
  1148. ThreadList m_runnable_threads;
  1149. ThreadList m_nonrunnable_threads;
  1150. bool has_thread(Thread& thread) const
  1151. {
  1152. return m_runnable_threads.contains(thread) || m_nonrunnable_threads.contains(thread);
  1153. }
  1154. ThreadList& thread_list_for_state(Thread::State state)
  1155. {
  1156. if (Thread::is_runnable_state(state))
  1157. return m_runnable_threads;
  1158. return m_nonrunnable_threads;
  1159. }
  1160. };
  1161. template<typename Callback>
  1162. inline IterationDecision Scheduler::for_each_runnable(Callback callback)
  1163. {
  1164. ASSERT_INTERRUPTS_DISABLED();
  1165. ASSERT(g_scheduler_lock.own_lock());
  1166. auto& tl = g_scheduler_data->m_runnable_threads;
  1167. for (auto it = tl.begin(); it != tl.end();) {
  1168. auto& thread = *it;
  1169. it = ++it;
  1170. if (callback(thread) == IterationDecision::Break)
  1171. return IterationDecision::Break;
  1172. }
  1173. return IterationDecision::Continue;
  1174. }
  1175. template<typename Callback>
  1176. inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
  1177. {
  1178. ASSERT_INTERRUPTS_DISABLED();
  1179. ASSERT(g_scheduler_lock.own_lock());
  1180. auto& tl = g_scheduler_data->m_nonrunnable_threads;
  1181. for (auto it = tl.begin(); it != tl.end();) {
  1182. auto& thread = *it;
  1183. it = ++it;
  1184. if (callback(thread) == IterationDecision::Break)
  1185. return IterationDecision::Break;
  1186. }
  1187. return IterationDecision::Continue;
  1188. }
  1189. }
  1190. template<>
  1191. struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
  1192. void format(FormatBuilder&, const Kernel::Thread&);
  1193. };