Thread.h 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/IntrusiveList.h>
  29. #include <AK/Optional.h>
  30. #include <AK/OwnPtr.h>
  31. #include <AK/String.h>
  32. #include <AK/Time.h>
  33. #include <AK/Vector.h>
  34. #include <AK/WeakPtr.h>
  35. #include <AK/Weakable.h>
  36. #include <Kernel/Arch/i386/CPU.h>
  37. #include <Kernel/Forward.h>
  38. #include <Kernel/KResult.h>
  39. #include <Kernel/LockMode.h>
  40. #include <Kernel/Scheduler.h>
  41. #include <Kernel/ThreadTracer.h>
  42. #include <Kernel/TimerQueue.h>
  43. #include <Kernel/UnixTypes.h>
  44. #include <LibC/fd_set.h>
  45. #include <LibELF/AuxiliaryVector.h>
  46. //#define LOCK_DEBUG
  47. namespace Kernel {
  48. enum class DispatchSignalResult {
  49. Deferred = 0,
  50. Yield,
  51. Terminate,
  52. Continue
  53. };
  54. struct SignalActionData {
  55. VirtualAddress handler_or_sigaction;
  56. u32 mask { 0 };
  57. int flags { 0 };
  58. };
  59. struct ThreadSpecificData {
  60. ThreadSpecificData* self;
  61. };
  62. #define THREAD_PRIORITY_MIN 1
  63. #define THREAD_PRIORITY_LOW 10
  64. #define THREAD_PRIORITY_NORMAL 30
  65. #define THREAD_PRIORITY_HIGH 50
  66. #define THREAD_PRIORITY_MAX 99
  67. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  68. class Thread
  69. : public RefCounted<Thread>
  70. , public Weakable<Thread> {
  71. AK_MAKE_NONCOPYABLE(Thread);
  72. AK_MAKE_NONMOVABLE(Thread);
  73. friend class Process;
  74. friend class Scheduler;
  75. public:
  76. inline static Thread* current()
  77. {
  78. return Processor::current().current_thread();
  79. }
  80. explicit Thread(NonnullRefPtr<Process>);
  81. ~Thread();
  82. static RefPtr<Thread> from_tid(ThreadID);
  83. static void finalize_dying_threads();
  84. ThreadID tid() const { return m_tid; }
  85. ProcessID pid() const;
  86. void set_priority(u32 p) { m_priority = p; }
  87. u32 priority() const { return m_priority; }
  88. void set_priority_boost(u32 boost) { m_priority_boost = boost; }
  89. u32 priority_boost() const { return m_priority_boost; }
  90. u32 effective_priority() const;
  91. void detach()
  92. {
  93. ScopedSpinLock lock(m_lock);
  94. m_is_joinable = false;
  95. }
  96. bool is_joinable() const
  97. {
  98. ScopedSpinLock lock(m_lock);
  99. return m_is_joinable;
  100. }
  101. Process& process() { return m_process; }
  102. const Process& process() const { return m_process; }
  103. String backtrace();
  104. Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
  105. String name() const
  106. {
  107. // Because the name can be changed, we can't return a const
  108. // reference here. We must make a copy
  109. ScopedSpinLock lock(m_lock);
  110. return m_name;
  111. }
  112. void set_name(const StringView& s)
  113. {
  114. ScopedSpinLock lock(m_lock);
  115. m_name = s;
  116. }
  117. void set_name(String&& name)
  118. {
  119. ScopedSpinLock lock(m_lock);
  120. m_name = move(name);
  121. }
  122. void finalize();
  123. enum State : u8 {
  124. Invalid = 0,
  125. Runnable,
  126. Running,
  127. Dying,
  128. Dead,
  129. Stopped,
  130. Blocked
  131. };
  132. class BlockResult {
  133. public:
  134. enum Type {
  135. WokeNormally,
  136. NotBlocked,
  137. InterruptedBySignal,
  138. InterruptedByDeath,
  139. InterruptedByTimeout,
  140. };
  141. BlockResult() = delete;
  142. BlockResult(Type type)
  143. : m_type(type)
  144. {
  145. }
  146. bool operator==(Type type) const
  147. {
  148. return m_type == type;
  149. }
  150. bool operator!=(Type type) const
  151. {
  152. return m_type != type;
  153. }
  154. bool was_interrupted() const
  155. {
  156. switch (m_type) {
  157. case InterruptedBySignal:
  158. case InterruptedByDeath:
  159. return true;
  160. default:
  161. return false;
  162. }
  163. }
  164. bool timed_out() const
  165. {
  166. return m_type == InterruptedByTimeout;
  167. }
  168. private:
  169. Type m_type;
  170. };
  171. class BlockTimeout {
  172. public:
  173. BlockTimeout()
  174. : m_infinite(true)
  175. {
  176. }
  177. BlockTimeout(std::nullptr_t)
  178. : m_infinite(true)
  179. {
  180. }
  181. explicit BlockTimeout(bool is_absolute, const timeval* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC)
  182. : m_clock_id(clock_id)
  183. , m_infinite(!time)
  184. {
  185. if (!m_infinite) {
  186. if (time->tv_sec > 0 || time->tv_usec > 0) {
  187. timeval_to_timespec(*time, m_time);
  188. m_should_block = true;
  189. }
  190. m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id).value();
  191. if (!is_absolute)
  192. timespec_add(m_time, m_start_time, m_time);
  193. }
  194. }
  195. explicit BlockTimeout(bool is_absolute, const timespec* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC)
  196. : m_clock_id(clock_id)
  197. , m_infinite(!time)
  198. {
  199. if (!m_infinite) {
  200. if (time->tv_sec > 0 || time->tv_nsec > 0) {
  201. m_time = *time;
  202. m_should_block = true;
  203. }
  204. m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id).value();
  205. if (!is_absolute)
  206. timespec_add(m_time, m_start_time, m_time);
  207. }
  208. }
  209. const timespec& absolute_time() const { return m_time; }
  210. const timespec* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
  211. clockid_t clock_id() const { return m_clock_id; }
  212. bool is_infinite() const { return m_infinite; }
  213. bool should_block() const { return m_infinite || m_should_block; };
  214. private:
  215. timespec m_time { 0, 0 };
  216. timespec m_start_time { 0, 0 };
  217. clockid_t m_clock_id { CLOCK_MONOTONIC };
  218. bool m_infinite { false };
  219. bool m_should_block { false };
  220. };
  221. class BlockCondition;
  222. class Blocker {
  223. public:
  224. enum class Type {
  225. Unknown = 0,
  226. File,
  227. Plan9FS,
  228. Join,
  229. Queue,
  230. Routing,
  231. Sleep,
  232. Wait
  233. };
  234. virtual ~Blocker();
  235. virtual const char* state_string() const = 0;
  236. virtual bool should_block() { return true; }
  237. virtual Type blocker_type() const = 0;
  238. virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
  239. virtual bool can_be_interrupted() const { return true; }
  240. virtual void not_blocking(bool) = 0;
  241. virtual void was_unblocked(bool did_timeout)
  242. {
  243. if (did_timeout) {
  244. ScopedSpinLock lock(m_lock);
  245. m_did_timeout = true;
  246. }
  247. }
  248. void set_interrupted_by_death()
  249. {
  250. ScopedSpinLock lock(m_lock);
  251. do_set_interrupted_by_death();
  252. }
  253. void set_interrupted_by_signal(u8 signal)
  254. {
  255. ScopedSpinLock lock(m_lock);
  256. do_set_interrupted_by_signal(signal);
  257. }
  258. u8 was_interrupted_by_signal() const
  259. {
  260. ScopedSpinLock lock(m_lock);
  261. return do_get_interrupted_by_signal();
  262. }
  263. virtual Thread::BlockResult block_result()
  264. {
  265. ScopedSpinLock lock(m_lock);
  266. if (m_was_interrupted_by_death)
  267. return Thread::BlockResult::InterruptedByDeath;
  268. if (m_was_interrupted_by_signal != 0)
  269. return Thread::BlockResult::InterruptedBySignal;
  270. if (m_did_timeout)
  271. return Thread::BlockResult::InterruptedByTimeout;
  272. return Thread::BlockResult::WokeNormally;
  273. }
  274. void begin_blocking(Badge<Thread>);
  275. BlockResult end_blocking(Badge<Thread>, bool);
  276. protected:
  277. void do_set_interrupted_by_death()
  278. {
  279. m_was_interrupted_by_death = true;
  280. }
  281. void do_set_interrupted_by_signal(u8 signal)
  282. {
  283. ASSERT(signal != 0);
  284. m_was_interrupted_by_signal = signal;
  285. }
  286. void do_clear_interrupted_by_signal()
  287. {
  288. m_was_interrupted_by_signal = 0;
  289. }
  290. u8 do_get_interrupted_by_signal() const
  291. {
  292. return m_was_interrupted_by_signal;
  293. }
  294. bool was_interrupted() const
  295. {
  296. return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
  297. }
  298. void unblock_from_blocker()
  299. {
  300. RefPtr<Thread> thread;
  301. {
  302. ScopedSpinLock lock(m_lock);
  303. if (m_is_blocking) {
  304. m_is_blocking = false;
  305. ASSERT(m_blocked_thread);
  306. thread = m_blocked_thread;
  307. }
  308. }
  309. if (thread)
  310. thread->unblock_from_blocker(*this);
  311. }
  312. bool set_block_condition(BlockCondition&, void* = nullptr);
  313. mutable RecursiveSpinLock m_lock;
  314. private:
  315. BlockCondition* m_block_condition { nullptr };
  316. void* m_block_data { nullptr };
  317. Thread* m_blocked_thread { nullptr };
  318. u8 m_was_interrupted_by_signal { 0 };
  319. bool m_is_blocking { false };
  320. bool m_was_interrupted_by_death { false };
  321. bool m_did_timeout { false };
  322. };
  323. class BlockCondition {
  324. AK_MAKE_NONCOPYABLE(BlockCondition);
  325. AK_MAKE_NONMOVABLE(BlockCondition);
  326. public:
  327. BlockCondition() = default;
  328. virtual ~BlockCondition()
  329. {
  330. ScopedSpinLock lock(m_lock);
  331. ASSERT(m_blockers.is_empty());
  332. }
  333. bool add_blocker(Blocker& blocker, void* data)
  334. {
  335. ScopedSpinLock lock(m_lock);
  336. if (!should_add_blocker(blocker, data))
  337. return false;
  338. m_blockers.append({ &blocker, data });
  339. return true;
  340. }
  341. void remove_blocker(Blocker& blocker, void* data)
  342. {
  343. ScopedSpinLock lock(m_lock);
  344. // NOTE: it's possible that the blocker is no longer present
  345. m_blockers.remove_first_matching([&](auto& info) {
  346. return info.blocker == &blocker && info.data == data;
  347. });
  348. }
  349. protected:
  350. template<typename UnblockOne>
  351. void unblock(UnblockOne unblock_one)
  352. {
  353. ScopedSpinLock lock(m_lock);
  354. do_unblock(unblock_one);
  355. }
  356. template<typename UnblockOne>
  357. void do_unblock(UnblockOne unblock_one)
  358. {
  359. ASSERT(m_lock.is_locked());
  360. for (size_t i = 0; i < m_blockers.size();) {
  361. auto& info = m_blockers[i];
  362. if (unblock_one(*info.blocker, info.data)) {
  363. m_blockers.remove(i);
  364. continue;
  365. }
  366. i++;
  367. }
  368. }
  369. template<typename UnblockOne>
  370. bool unblock_some(UnblockOne unblock_one)
  371. {
  372. ScopedSpinLock lock(m_lock);
  373. return do_unblock_some(unblock_one);
  374. }
  375. template<typename UnblockOne>
  376. bool do_unblock_some(UnblockOne unblock_one)
  377. {
  378. ASSERT(m_lock.is_locked());
  379. bool stop_iterating = false;
  380. for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
  381. auto& info = m_blockers[i];
  382. if (unblock_one(*info.blocker, info.data, stop_iterating)) {
  383. m_blockers.remove(i);
  384. continue;
  385. }
  386. i++;
  387. }
  388. return !stop_iterating;
  389. }
  390. template<typename UnblockOne>
  391. bool unblock_all(UnblockOne unblock_one)
  392. {
  393. ScopedSpinLock lock(m_lock);
  394. return do_unblock_all(unblock_one);
  395. }
  396. template<typename UnblockOne>
  397. bool do_unblock_all(UnblockOne unblock_one)
  398. {
  399. ASSERT(m_lock.is_locked());
  400. bool unblocked_any = false;
  401. for (auto& info : m_blockers) {
  402. bool did_unblock = unblock_one(*info.blocker, info.data);
  403. unblocked_any |= did_unblock;
  404. ASSERT(did_unblock);
  405. }
  406. m_blockers.clear();
  407. return unblocked_any;
  408. }
  409. virtual bool should_add_blocker(Blocker&, void*) { return true; }
  410. SpinLock<u8> m_lock;
  411. private:
  412. struct BlockerInfo {
  413. Blocker* blocker;
  414. void* data;
  415. };
  416. Vector<BlockerInfo, 4> m_blockers;
  417. };
  418. friend class JoinBlocker;
  419. class JoinBlocker final : public Blocker {
  420. public:
  421. explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
  422. virtual Type blocker_type() const override { return Type::Join; }
  423. virtual const char* state_string() const override { return "Joining"; }
  424. virtual bool can_be_interrupted() const override { return false; }
  425. virtual bool should_block() override { return !m_join_error && m_should_block; }
  426. virtual void not_blocking(bool) override;
  427. bool unblock(void*, bool);
  428. private:
  429. NonnullRefPtr<Thread> m_joinee;
  430. void*& m_joinee_exit_value;
  431. bool m_join_error { false };
  432. bool m_did_unblock { false };
  433. bool m_should_block { true };
  434. };
  435. class QueueBlocker : public Blocker {
  436. public:
  437. explicit QueueBlocker(WaitQueue&, const char* block_reason = nullptr);
  438. virtual ~QueueBlocker();
  439. virtual Type blocker_type() const override { return Type::Queue; }
  440. virtual const char* state_string() const override { return m_block_reason ? m_block_reason : "Queue"; }
  441. virtual void not_blocking(bool) override { }
  442. virtual bool should_block() override
  443. {
  444. return m_should_block;
  445. }
  446. bool unblock();
  447. protected:
  448. const char* const m_block_reason;
  449. bool m_should_block { true };
  450. bool m_did_unblock { false };
  451. };
  452. class FileBlocker : public Blocker {
  453. public:
  454. enum class BlockFlags : u32 {
  455. None = 0,
  456. Read = 1 << 0,
  457. Write = 1 << 1,
  458. ReadPriority = 1 << 2,
  459. Accept = 1 << 3,
  460. Connect = 1 << 4,
  461. SocketFlags = Accept | Connect,
  462. WriteNotOpen = 1 << 5,
  463. WriteError = 1 << 6,
  464. WriteHangUp = 1 << 7,
  465. ReadHangUp = 1 << 8,
  466. Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
  467. };
  468. virtual Type blocker_type() const override { return Type::File; }
  469. virtual bool should_block() override
  470. {
  471. return m_should_block;
  472. }
  473. virtual bool unblock(bool, void*) = 0;
  474. protected:
  475. bool m_should_block { true };
  476. };
  477. class FileDescriptionBlocker : public FileBlocker {
  478. public:
  479. const FileDescription& blocked_description() const;
  480. virtual bool unblock(bool, void*) override;
  481. virtual void not_blocking(bool) override;
  482. protected:
  483. explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
  484. private:
  485. NonnullRefPtr<FileDescription> m_blocked_description;
  486. const BlockFlags m_flags;
  487. BlockFlags& m_unblocked_flags;
  488. bool m_did_unblock { false };
  489. bool m_should_block { true };
  490. };
  491. class AcceptBlocker final : public FileDescriptionBlocker {
  492. public:
  493. explicit AcceptBlocker(FileDescription&, BlockFlags&);
  494. virtual const char* state_string() const override { return "Accepting"; }
  495. };
  496. class ConnectBlocker final : public FileDescriptionBlocker {
  497. public:
  498. explicit ConnectBlocker(FileDescription&, BlockFlags&);
  499. virtual const char* state_string() const override { return "Connecting"; }
  500. };
  501. class WriteBlocker final : public FileDescriptionBlocker {
  502. public:
  503. explicit WriteBlocker(FileDescription&, BlockFlags&);
  504. virtual const char* state_string() const override { return "Writing"; }
  505. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  506. private:
  507. BlockTimeout m_timeout;
  508. };
  509. class ReadBlocker final : public FileDescriptionBlocker {
  510. public:
  511. explicit ReadBlocker(FileDescription&, BlockFlags&);
  512. virtual const char* state_string() const override { return "Reading"; }
  513. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  514. private:
  515. BlockTimeout m_timeout;
  516. };
  517. class SleepBlocker final : public Blocker {
  518. public:
  519. explicit SleepBlocker(const BlockTimeout&, timespec* = nullptr);
  520. virtual const char* state_string() const override { return "Sleeping"; }
  521. virtual Type blocker_type() const override { return Type::Sleep; }
  522. virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
  523. virtual void not_blocking(bool) override;
  524. virtual void was_unblocked(bool) override;
  525. virtual Thread::BlockResult block_result() override;
  526. private:
  527. void calculate_remaining();
  528. BlockTimeout m_deadline;
  529. timespec* m_remaining;
  530. };
  531. class SelectBlocker final : public FileBlocker {
  532. public:
  533. struct FDInfo {
  534. NonnullRefPtr<FileDescription> description;
  535. BlockFlags block_flags;
  536. BlockFlags unblocked_flags { BlockFlags::None };
  537. };
  538. typedef Vector<FDInfo, FD_SETSIZE> FDVector;
  539. SelectBlocker(FDVector& fds);
  540. virtual ~SelectBlocker();
  541. virtual bool unblock(bool, void*) override;
  542. virtual void not_blocking(bool) override;
  543. virtual void was_unblocked(bool) override;
  544. virtual const char* state_string() const override { return "Selecting"; }
  545. private:
  546. size_t collect_unblocked_flags();
  547. FDVector& m_fds;
  548. bool m_did_unblock { false };
  549. };
  550. class WaitBlocker final : public Blocker {
  551. public:
  552. enum class UnblockFlags {
  553. Terminated,
  554. Stopped,
  555. Continued,
  556. Disowned
  557. };
  558. WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
  559. virtual const char* state_string() const override { return "Waiting"; }
  560. virtual Type blocker_type() const override { return Type::Wait; }
  561. virtual bool should_block() override { return m_should_block; }
  562. virtual void not_blocking(bool) override;
  563. virtual void was_unblocked(bool) override;
  564. bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
  565. bool is_wait() const { return !(m_wait_options & WNOWAIT); }
  566. private:
  567. void do_was_disowned();
  568. void do_set_result(const siginfo_t&);
  569. const int m_wait_options;
  570. const idtype_t m_id_type;
  571. const pid_t m_waitee_id;
  572. KResultOr<siginfo_t>& m_result;
  573. RefPtr<Process> m_waitee;
  574. RefPtr<ProcessGroup> m_waitee_group;
  575. bool m_did_unblock { false };
  576. bool m_error { false };
  577. bool m_got_sigchild { false };
  578. bool m_should_block;
  579. };
  580. class WaitBlockCondition final : public BlockCondition {
  581. friend class WaitBlocker;
  582. public:
  583. WaitBlockCondition(Process& process)
  584. : m_process(process)
  585. {
  586. }
  587. void disowned_by_waiter(Process&);
  588. bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
  589. void try_unblock(WaitBlocker&);
  590. void finalize();
  591. protected:
  592. virtual bool should_add_blocker(Blocker&, void*) override;
  593. private:
  594. struct ProcessBlockInfo {
  595. NonnullRefPtr<Process> process;
  596. WaitBlocker::UnblockFlags flags;
  597. u8 signal;
  598. bool was_waited { false };
  599. explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
  600. ~ProcessBlockInfo();
  601. };
  602. Process& m_process;
  603. Vector<ProcessBlockInfo, 2> m_processes;
  604. bool m_finalized { false };
  605. };
  606. template<typename AddBlockerHandler>
  607. KResult try_join(AddBlockerHandler add_blocker)
  608. {
  609. if (Thread::current() == this)
  610. return KResult(-EDEADLK);
  611. ScopedSpinLock lock(m_lock);
  612. if (!m_is_joinable || state() == Dead)
  613. return KResult(-EINVAL);
  614. add_blocker();
  615. // From this point on the thread is no longer joinable by anyone
  616. // else. It also means that if the join is timed, it becomes
  617. // detached when a timeout happens.
  618. m_is_joinable = false;
  619. return KSuccess;
  620. }
  621. void did_schedule() { ++m_times_scheduled; }
  622. u32 times_scheduled() const { return m_times_scheduled; }
  623. void resume_from_stopped();
  624. bool should_be_stopped() const;
  625. bool is_stopped() const { return m_state == Stopped; }
  626. bool is_blocked() const { return m_state == Blocked; }
  627. bool is_in_block() const
  628. {
  629. ScopedSpinLock lock(m_block_lock);
  630. return m_in_block;
  631. }
  632. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  633. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  634. u32 affinity() const { return m_cpu_affinity; }
  635. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  636. u32 stack_ptr() const { return m_tss.esp; }
  637. RegisterState& get_register_dump_from_stack();
  638. TSS32& tss() { return m_tss; }
  639. const TSS32& tss() const { return m_tss; }
  640. State state() const { return m_state; }
  641. const char* state_string() const;
  642. u32 ticks() const { return m_ticks; }
  643. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  644. size_t thread_specific_region_size() const { return m_thread_specific_region_size; }
  645. ALWAYS_INLINE void yield_if_stopped()
  646. {
  647. // If some thread stopped us, we need to yield to someone else
  648. // We check this when entering/exiting a system call. A thread
  649. // may continue to execute in user land until the next timer
  650. // tick or entering the next system call, or if it's in kernel
  651. // mode then we will intercept prior to returning back to user
  652. // mode.
  653. ScopedSpinLock lock(m_lock);
  654. while (state() == Thread::Stopped) {
  655. lock.unlock();
  656. // We shouldn't be holding the big lock here
  657. yield_while_not_holding_big_lock();
  658. lock.lock();
  659. }
  660. }
  661. template<typename T, class... Args>
  662. [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
  663. {
  664. ASSERT(!Processor::current().in_irq());
  665. ASSERT(this == Thread::current());
  666. ScopedCritical critical;
  667. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  668. ScopedSpinLock block_lock(m_block_lock);
  669. // We need to hold m_block_lock so that nobody can unblock a blocker as soon
  670. // as it is constructed and registered elsewhere
  671. m_in_block = true;
  672. T t(forward<Args>(args)...);
  673. Atomic<bool> timeout_unblocked(false);
  674. Atomic<bool> did_unblock(false);
  675. RefPtr<Timer> timer;
  676. {
  677. switch (state()) {
  678. case Thread::Stopped:
  679. // It's possible that we were requested to be stopped!
  680. break;
  681. case Thread::Running:
  682. ASSERT(m_blocker == nullptr);
  683. break;
  684. default:
  685. ASSERT_NOT_REACHED();
  686. }
  687. m_blocker = &t;
  688. if (!t.should_block()) {
  689. // Don't block if the wake condition is already met
  690. t.not_blocking(false);
  691. m_blocker = nullptr;
  692. m_in_block = false;
  693. return BlockResult::NotBlocked;
  694. }
  695. auto& block_timeout = t.override_timeout(timeout);
  696. if (!block_timeout.is_infinite()) {
  697. // Process::kill_all_threads may be called at any time, which will mark all
  698. // threads to die. In that case
  699. timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
  700. ASSERT(!Processor::current().in_irq());
  701. ASSERT(!g_scheduler_lock.own_lock());
  702. ASSERT(!m_block_lock.own_lock());
  703. // NOTE: this may execute on the same or any other processor!
  704. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  705. ScopedSpinLock block_lock(m_block_lock);
  706. if (m_blocker && timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed) == false)
  707. unblock();
  708. });
  709. if (!timer) {
  710. // Timeout is already in the past
  711. t.not_blocking(true);
  712. m_blocker = nullptr;
  713. m_in_block = false;
  714. return BlockResult::InterruptedByTimeout;
  715. }
  716. }
  717. t.begin_blocking({});
  718. set_state(Thread::Blocked);
  719. }
  720. block_lock.unlock();
  721. bool did_timeout = false;
  722. auto previous_locked = LockMode::Unlocked;
  723. u32 lock_count_to_restore = 0;
  724. for (;;) {
  725. scheduler_lock.unlock();
  726. // Yield to the scheduler, and wait for us to resume unblocked.
  727. if (previous_locked == LockMode::Unlocked)
  728. previous_locked = unlock_process_if_locked(lock_count_to_restore);
  729. ASSERT(!g_scheduler_lock.own_lock());
  730. ASSERT(Processor::current().in_critical());
  731. yield_while_not_holding_big_lock();
  732. scheduler_lock.lock();
  733. ScopedSpinLock block_lock2(m_block_lock);
  734. if (should_be_stopped() || state() == Stopped) {
  735. dbg() << "Thread should be stopped, current state: " << state_string();
  736. set_state(Thread::Blocked);
  737. continue;
  738. }
  739. if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
  740. block_lock2.unlock();
  741. dbg() << "Thread should not be unblocking, current state: " << state_string();
  742. set_state(Thread::Blocked);
  743. continue;
  744. }
  745. // Prevent the timeout from unblocking this thread if it happens to
  746. // be in the process of firing already
  747. did_timeout |= timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed);
  748. if (m_blocker) {
  749. // Remove ourselves...
  750. ASSERT(m_blocker == &t);
  751. m_blocker = nullptr;
  752. }
  753. m_in_block = false;
  754. break;
  755. }
  756. if (t.was_interrupted_by_signal()) {
  757. ScopedSpinLock lock(m_lock);
  758. dispatch_one_pending_signal();
  759. }
  760. // Notify the blocker that we are no longer blocking. It may need
  761. // to clean up now while we're still holding m_lock
  762. auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
  763. scheduler_lock.unlock();
  764. if (timer && !did_timeout) {
  765. // Cancel the timer while not holding any locks. This allows
  766. // the timer function to complete before we remove it
  767. // (e.g. if it's on another processor)
  768. TimerQueue::the().cancel_timer(timer.release_nonnull());
  769. }
  770. if (previous_locked != LockMode::Unlocked) {
  771. // NOTE: this may trigger another call to Thread::block(), so
  772. // we need to do this after we're all done and restored m_in_block!
  773. relock_process(previous_locked, lock_count_to_restore);
  774. }
  775. return result;
  776. }
  777. void unblock_from_blocker(Blocker&);
  778. void unblock(u8 signal = 0);
  779. template<class... Args>
  780. Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args)
  781. {
  782. ASSERT(this == Thread::current());
  783. return block<Thread::QueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
  784. }
  785. BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr);
  786. BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr)
  787. {
  788. return sleep(CLOCK_MONOTONIC, duration, remaining_time);
  789. }
  790. BlockResult sleep_until(clockid_t, const timespec&);
  791. BlockResult sleep_until(const timespec& duration)
  792. {
  793. return sleep_until(CLOCK_MONOTONIC, duration);
  794. }
  795. // Tell this thread to unblock if needed,
  796. // gracefully unwind the stack and die.
  797. void set_should_die();
  798. bool should_die() const { return m_should_die; }
  799. void die_if_needed();
  800. void exit(void* = nullptr);
  801. bool tick();
  802. void set_ticks_left(u32 t) { m_ticks_left = t; }
  803. u32 ticks_left() const { return m_ticks_left; }
  804. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  805. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  806. void set_state(State, u8 = 0);
  807. bool is_initialized() const { return m_initialized; }
  808. void set_initialized(bool initialized) { m_initialized = initialized; }
  809. void send_urgent_signal_to_self(u8 signal);
  810. void send_signal(u8 signal, Process* sender);
  811. u32 update_signal_mask(u32 signal_mask);
  812. u32 signal_mask_block(sigset_t signal_set, bool block);
  813. u32 signal_mask() const;
  814. void clear_signals();
  815. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  816. DispatchSignalResult dispatch_one_pending_signal();
  817. DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
  818. DispatchSignalResult dispatch_signal(u8 signal);
  819. void check_dispatch_pending_signal();
  820. bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
  821. void terminate_due_to_signal(u8 signal);
  822. bool should_ignore_signal(u8 signal) const;
  823. bool has_signal_handler(u8 signal) const;
  824. bool has_pending_signal(u8 signal) const;
  825. u32 pending_signals() const;
  826. u32 pending_signals_for_state() const;
  827. FPUState& fpu_state() { return *m_fpu_state; }
  828. void set_default_signal_dispositions();
  829. bool push_value_on_stack(FlatPtr);
  830. KResultOr<u32> make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment, Vector<AuxiliaryValue>);
  831. KResult make_thread_specific_region(Badge<Process>);
  832. unsigned syscall_count() const { return m_syscall_count; }
  833. void did_syscall() { ++m_syscall_count; }
  834. unsigned inode_faults() const { return m_inode_faults; }
  835. void did_inode_fault() { ++m_inode_faults; }
  836. unsigned zero_faults() const { return m_zero_faults; }
  837. void did_zero_fault() { ++m_zero_faults; }
  838. unsigned cow_faults() const { return m_cow_faults; }
  839. void did_cow_fault() { ++m_cow_faults; }
  840. unsigned file_read_bytes() const { return m_file_read_bytes; }
  841. unsigned file_write_bytes() const { return m_file_write_bytes; }
  842. void did_file_read(unsigned bytes)
  843. {
  844. m_file_read_bytes += bytes;
  845. }
  846. void did_file_write(unsigned bytes)
  847. {
  848. m_file_write_bytes += bytes;
  849. }
  850. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  851. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  852. void did_unix_socket_read(unsigned bytes)
  853. {
  854. m_unix_socket_read_bytes += bytes;
  855. }
  856. void did_unix_socket_write(unsigned bytes)
  857. {
  858. m_unix_socket_write_bytes += bytes;
  859. }
  860. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  861. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  862. void did_ipv4_socket_read(unsigned bytes)
  863. {
  864. m_ipv4_socket_read_bytes += bytes;
  865. }
  866. void did_ipv4_socket_write(unsigned bytes)
  867. {
  868. m_ipv4_socket_write_bytes += bytes;
  869. }
  870. void set_active(bool active)
  871. {
  872. m_is_active.store(active, AK::memory_order_release);
  873. }
  874. bool is_active() const
  875. {
  876. return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
  877. }
  878. bool is_finalizable() const
  879. {
  880. // We can't finalize as long as this thread is still running
  881. // Note that checking for Running state here isn't sufficient
  882. // as the thread may not be in Running state but switching out.
  883. // m_is_active is set to false once the context switch is
  884. // complete and the thread is not executing on any processor.
  885. if (m_is_active.load(AK::memory_order_acquire))
  886. return false;
  887. // We can't finalize until the thread is either detached or
  888. // a join has started. We can't make m_is_joinable atomic
  889. // because that would introduce a race in try_join.
  890. ScopedSpinLock lock(m_lock);
  891. return !m_is_joinable;
  892. }
  893. RefPtr<Thread> clone(Process&);
  894. template<typename Callback>
  895. static IterationDecision for_each_in_state(State, Callback);
  896. template<typename Callback>
  897. static IterationDecision for_each_living(Callback);
  898. template<typename Callback>
  899. static IterationDecision for_each(Callback);
  900. static bool is_runnable_state(Thread::State state)
  901. {
  902. return state == Thread::State::Running || state == Thread::State::Runnable;
  903. }
  904. static constexpr u32 default_kernel_stack_size = 65536;
  905. static constexpr u32 default_userspace_stack_size = 4 * MiB;
  906. RecursiveSpinLock& get_lock() const { return m_lock; }
  907. #ifdef LOCK_DEBUG
  908. void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
  909. {
  910. ASSERT(refs_delta != 0);
  911. m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
  912. ScopedSpinLock list_lock(m_holding_locks_lock);
  913. if (refs_delta > 0) {
  914. bool have_existing = false;
  915. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  916. auto& info = m_holding_locks_list[i];
  917. if (info.lock == &lock) {
  918. have_existing = true;
  919. info.count += refs_delta;
  920. break;
  921. }
  922. }
  923. if (!have_existing)
  924. m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
  925. } else {
  926. ASSERT(refs_delta < 0);
  927. bool found = false;
  928. for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
  929. auto& info = m_holding_locks_list[i];
  930. if (info.lock == &lock) {
  931. ASSERT(info.count >= (unsigned)-refs_delta);
  932. info.count -= (unsigned)-refs_delta;
  933. if (info.count == 0)
  934. m_holding_locks_list.remove(i);
  935. found = true;
  936. break;
  937. }
  938. }
  939. ASSERT(found);
  940. }
  941. }
  942. u32 lock_count() const
  943. {
  944. return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
  945. }
  946. #endif
  947. private:
  948. IntrusiveListNode m_runnable_list_node;
  949. private:
  950. friend struct SchedulerData;
  951. friend class WaitQueue;
  952. class JoinBlockCondition : public BlockCondition {
  953. public:
  954. void thread_did_exit(void* exit_value)
  955. {
  956. ScopedSpinLock lock(m_lock);
  957. ASSERT(!m_thread_did_exit);
  958. m_thread_did_exit = true;
  959. m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
  960. do_unblock_joiner();
  961. }
  962. void thread_finalizing()
  963. {
  964. ScopedSpinLock lock(m_lock);
  965. do_unblock_joiner();
  966. }
  967. void* exit_value() const
  968. {
  969. ASSERT(m_thread_did_exit);
  970. return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
  971. }
  972. void try_unblock(JoinBlocker& blocker)
  973. {
  974. ScopedSpinLock lock(m_lock);
  975. if (m_thread_did_exit)
  976. blocker.unblock(exit_value(), false);
  977. }
  978. protected:
  979. virtual bool should_add_blocker(Blocker& b, void*) override
  980. {
  981. ASSERT(b.blocker_type() == Blocker::Type::Join);
  982. auto& blocker = static_cast<JoinBlocker&>(b);
  983. // NOTE: m_lock is held already!
  984. if (m_thread_did_exit) {
  985. blocker.unblock(exit_value(), true);
  986. return false;
  987. }
  988. return true;
  989. }
  990. private:
  991. void do_unblock_joiner()
  992. {
  993. do_unblock_all([&](Blocker& b, void*) {
  994. ASSERT(b.blocker_type() == Blocker::Type::Join);
  995. auto& blocker = static_cast<JoinBlocker&>(b);
  996. return blocker.unblock(exit_value(), false);
  997. });
  998. }
  999. Atomic<void*> m_exit_value { nullptr };
  1000. bool m_thread_did_exit { false };
  1001. };
  1002. LockMode unlock_process_if_locked(u32&);
  1003. void relock_process(LockMode, u32);
  1004. String backtrace_impl();
  1005. void reset_fpu_state();
  1006. mutable RecursiveSpinLock m_lock;
  1007. mutable RecursiveSpinLock m_block_lock;
  1008. NonnullRefPtr<Process> m_process;
  1009. ThreadID m_tid { -1 };
  1010. TSS32 m_tss;
  1011. Atomic<u32> m_cpu { 0 };
  1012. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  1013. u32 m_ticks { 0 };
  1014. u32 m_ticks_left { 0 };
  1015. u32 m_times_scheduled { 0 };
  1016. u32 m_pending_signals { 0 };
  1017. u32 m_signal_mask { 0 };
  1018. u32 m_kernel_stack_base { 0 };
  1019. u32 m_kernel_stack_top { 0 };
  1020. OwnPtr<Region> m_kernel_stack_region;
  1021. VirtualAddress m_thread_specific_data;
  1022. size_t m_thread_specific_region_size { 0 };
  1023. SignalActionData m_signal_action_data[32];
  1024. Blocker* m_blocker { nullptr };
  1025. #ifdef LOCK_DEBUG
  1026. struct HoldingLockInfo {
  1027. Lock* lock;
  1028. const char* file;
  1029. int line;
  1030. unsigned count;
  1031. };
  1032. Atomic<u32> m_holding_locks { 0 };
  1033. SpinLock<u8> m_holding_locks_lock;
  1034. Vector<HoldingLockInfo> m_holding_locks_list;
  1035. #endif
  1036. JoinBlockCondition m_join_condition;
  1037. Atomic<bool> m_is_active { false };
  1038. bool m_is_joinable { true };
  1039. unsigned m_syscall_count { 0 };
  1040. unsigned m_inode_faults { 0 };
  1041. unsigned m_zero_faults { 0 };
  1042. unsigned m_cow_faults { 0 };
  1043. unsigned m_file_read_bytes { 0 };
  1044. unsigned m_file_write_bytes { 0 };
  1045. unsigned m_unix_socket_read_bytes { 0 };
  1046. unsigned m_unix_socket_write_bytes { 0 };
  1047. unsigned m_ipv4_socket_read_bytes { 0 };
  1048. unsigned m_ipv4_socket_write_bytes { 0 };
  1049. FPUState* m_fpu_state { nullptr };
  1050. State m_state { Invalid };
  1051. String m_name;
  1052. u32 m_priority { THREAD_PRIORITY_NORMAL };
  1053. u32 m_extra_priority { 0 };
  1054. u32 m_priority_boost { 0 };
  1055. State m_stop_state { Invalid };
  1056. bool m_dump_backtrace_on_finalization { false };
  1057. bool m_should_die { false };
  1058. bool m_initialized { false };
  1059. bool m_in_block { false };
  1060. Atomic<bool> m_have_any_unmasked_pending_signals { false };
  1061. void yield_without_holding_big_lock();
  1062. void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
  1063. void yield_while_not_holding_big_lock();
  1064. void update_state_for_thread(Thread::State previous_state);
  1065. };
  1066. template<typename Callback>
  1067. inline IterationDecision Thread::for_each_living(Callback callback)
  1068. {
  1069. ASSERT_INTERRUPTS_DISABLED();
  1070. return Thread::for_each([callback](Thread& thread) -> IterationDecision {
  1071. if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
  1072. return callback(thread);
  1073. return IterationDecision::Continue;
  1074. });
  1075. }
  1076. template<typename Callback>
  1077. inline IterationDecision Thread::for_each(Callback callback)
  1078. {
  1079. ASSERT_INTERRUPTS_DISABLED();
  1080. ScopedSpinLock lock(g_scheduler_lock);
  1081. auto ret = Scheduler::for_each_runnable(callback);
  1082. if (ret == IterationDecision::Break)
  1083. return ret;
  1084. return Scheduler::for_each_nonrunnable(callback);
  1085. }
  1086. template<typename Callback>
  1087. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  1088. {
  1089. ASSERT_INTERRUPTS_DISABLED();
  1090. ScopedSpinLock lock(g_scheduler_lock);
  1091. auto new_callback = [=](Thread& thread) -> IterationDecision {
  1092. if (thread.state() == state)
  1093. return callback(thread);
  1094. return IterationDecision::Continue;
  1095. };
  1096. if (is_runnable_state(state))
  1097. return Scheduler::for_each_runnable(new_callback);
  1098. return Scheduler::for_each_nonrunnable(new_callback);
  1099. }
  1100. const LogStream& operator<<(const LogStream&, const Thread&);
  1101. struct SchedulerData {
  1102. typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
  1103. ThreadList m_runnable_threads;
  1104. ThreadList m_nonrunnable_threads;
  1105. bool has_thread(Thread& thread) const
  1106. {
  1107. return m_runnable_threads.contains(thread) || m_nonrunnable_threads.contains(thread);
  1108. }
  1109. ThreadList& thread_list_for_state(Thread::State state)
  1110. {
  1111. if (Thread::is_runnable_state(state))
  1112. return m_runnable_threads;
  1113. return m_nonrunnable_threads;
  1114. }
  1115. };
  1116. template<typename Callback>
  1117. inline IterationDecision Scheduler::for_each_runnable(Callback callback)
  1118. {
  1119. ASSERT_INTERRUPTS_DISABLED();
  1120. ASSERT(g_scheduler_lock.own_lock());
  1121. auto& tl = g_scheduler_data->m_runnable_threads;
  1122. for (auto it = tl.begin(); it != tl.end();) {
  1123. auto& thread = *it;
  1124. it = ++it;
  1125. if (callback(thread) == IterationDecision::Break)
  1126. return IterationDecision::Break;
  1127. }
  1128. return IterationDecision::Continue;
  1129. }
  1130. template<typename Callback>
  1131. inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
  1132. {
  1133. ASSERT_INTERRUPTS_DISABLED();
  1134. ASSERT(g_scheduler_lock.own_lock());
  1135. auto& tl = g_scheduler_data->m_nonrunnable_threads;
  1136. for (auto it = tl.begin(); it != tl.end();) {
  1137. auto& thread = *it;
  1138. it = ++it;
  1139. if (callback(thread) == IterationDecision::Break)
  1140. return IterationDecision::Break;
  1141. }
  1142. return IterationDecision::Continue;
  1143. }
  1144. }