Thread.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/IntrusiveList.h>
  29. #include <AK/Optional.h>
  30. #include <AK/OwnPtr.h>
  31. #include <AK/String.h>
  32. #include <AK/Vector.h>
  33. #include <Kernel/Arch/i386/CPU.h>
  34. #include <Kernel/Forward.h>
  35. #include <Kernel/KResult.h>
  36. #include <Kernel/Scheduler.h>
  37. #include <Kernel/ThreadTracer.h>
  38. #include <Kernel/UnixTypes.h>
  39. #include <LibC/fd_set.h>
  40. #include <LibELF/AuxiliaryVector.h>
  41. namespace Kernel {
  42. enum class ShouldUnblockThread {
  43. No = 0,
  44. Yes
  45. };
  46. struct SignalActionData {
  47. VirtualAddress handler_or_sigaction;
  48. u32 mask { 0 };
  49. int flags { 0 };
  50. };
  51. struct ThreadSpecificData {
  52. ThreadSpecificData* self;
  53. };
  54. #define THREAD_PRIORITY_MIN 1
  55. #define THREAD_PRIORITY_LOW 10
  56. #define THREAD_PRIORITY_NORMAL 30
  57. #define THREAD_PRIORITY_HIGH 50
  58. #define THREAD_PRIORITY_MAX 99
  59. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  60. class Thread {
  61. AK_MAKE_NONCOPYABLE(Thread);
  62. AK_MAKE_NONMOVABLE(Thread);
  63. friend class Process;
  64. friend class Scheduler;
  65. public:
  66. inline static Thread* current()
  67. {
  68. return Processor::current().current_thread();
  69. }
  70. explicit Thread(NonnullRefPtr<Process>);
  71. ~Thread();
  72. static Thread* from_tid(ThreadID);
  73. static void finalize_dying_threads();
  74. ThreadID tid() const { return m_tid; }
  75. ProcessID pid() const;
  76. void set_priority(u32 p) { m_priority = p; }
  77. u32 priority() const { return m_priority; }
  78. void set_priority_boost(u32 boost) { m_priority_boost = boost; }
  79. u32 priority_boost() const { return m_priority_boost; }
  80. u32 effective_priority() const;
  81. void set_joinable(bool j) { m_is_joinable = j; }
  82. bool is_joinable() const { return m_is_joinable; }
  83. Process& process() { return m_process; }
  84. const Process& process() const { return m_process; }
  85. String backtrace();
  86. Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
  87. const String& name() const { return m_name; }
  88. void set_name(const StringView& s) { m_name = s; }
  89. void finalize();
  90. enum State : u8 {
  91. Invalid = 0,
  92. Runnable,
  93. Running,
  94. Skip1SchedulerPass,
  95. Skip0SchedulerPasses,
  96. Dying,
  97. Dead,
  98. Stopped,
  99. Blocked,
  100. Queued,
  101. };
  102. class Blocker {
  103. public:
  104. virtual ~Blocker() { }
  105. virtual bool should_unblock(Thread&) = 0;
  106. virtual const char* state_string() const = 0;
  107. virtual bool is_reason_signal() const { return false; }
  108. virtual timespec* override_timeout(timespec* timeout) { return timeout; }
  109. void set_interrupted_by_death() { m_was_interrupted_by_death = true; }
  110. bool was_interrupted_by_death() const { return m_was_interrupted_by_death; }
  111. void set_interrupted_by_signal() { m_was_interrupted_while_blocked = true; }
  112. bool was_interrupted_by_signal() const { return m_was_interrupted_while_blocked; }
  113. private:
  114. bool m_was_interrupted_while_blocked { false };
  115. bool m_was_interrupted_by_death { false };
  116. friend class Thread;
  117. };
  118. class JoinBlocker final : public Blocker {
  119. public:
  120. explicit JoinBlocker(Thread& joinee, void*& joinee_exit_value);
  121. virtual bool should_unblock(Thread&) override;
  122. virtual const char* state_string() const override { return "Joining"; }
  123. void set_joinee_exit_value(void* value) { m_joinee_exit_value = value; }
  124. private:
  125. Thread& m_joinee;
  126. void*& m_joinee_exit_value;
  127. };
  128. class FileDescriptionBlocker : public Blocker {
  129. public:
  130. const FileDescription& blocked_description() const;
  131. protected:
  132. explicit FileDescriptionBlocker(const FileDescription&);
  133. private:
  134. NonnullRefPtr<FileDescription> m_blocked_description;
  135. };
  136. class AcceptBlocker final : public FileDescriptionBlocker {
  137. public:
  138. explicit AcceptBlocker(const FileDescription&);
  139. virtual bool should_unblock(Thread&) override;
  140. virtual const char* state_string() const override { return "Accepting"; }
  141. };
  142. class ConnectBlocker final : public FileDescriptionBlocker {
  143. public:
  144. explicit ConnectBlocker(const FileDescription&);
  145. virtual bool should_unblock(Thread&) override;
  146. virtual const char* state_string() const override { return "Connecting"; }
  147. };
  148. class WriteBlocker final : public FileDescriptionBlocker {
  149. public:
  150. explicit WriteBlocker(const FileDescription&);
  151. virtual bool should_unblock(Thread&) override;
  152. virtual const char* state_string() const override { return "Writing"; }
  153. virtual timespec* override_timeout(timespec*) override;
  154. private:
  155. timespec m_deadline;
  156. };
  157. class ReadBlocker final : public FileDescriptionBlocker {
  158. public:
  159. explicit ReadBlocker(const FileDescription&);
  160. virtual bool should_unblock(Thread&) override;
  161. virtual const char* state_string() const override { return "Reading"; }
  162. virtual timespec* override_timeout(timespec*) override;
  163. private:
  164. timespec m_deadline;
  165. };
  166. class ConditionBlocker final : public Blocker {
  167. public:
  168. ConditionBlocker(const char* state_string, Function<bool()>&& condition);
  169. virtual bool should_unblock(Thread&) override;
  170. virtual const char* state_string() const override { return m_state_string; }
  171. private:
  172. Function<bool()> m_block_until_condition;
  173. const char* m_state_string { nullptr };
  174. };
  175. class SleepBlocker final : public Blocker {
  176. public:
  177. explicit SleepBlocker(u64 wakeup_time);
  178. virtual bool should_unblock(Thread&) override;
  179. virtual const char* state_string() const override { return "Sleeping"; }
  180. private:
  181. u64 m_wakeup_time { 0 };
  182. };
  183. class SelectBlocker final : public Blocker {
  184. public:
  185. typedef Vector<int, FD_SETSIZE> FDVector;
  186. SelectBlocker(const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
  187. virtual bool should_unblock(Thread&) override;
  188. virtual const char* state_string() const override { return "Selecting"; }
  189. private:
  190. const FDVector& m_select_read_fds;
  191. const FDVector& m_select_write_fds;
  192. const FDVector& m_select_exceptional_fds;
  193. };
  194. class WaitBlocker final : public Blocker {
  195. public:
  196. WaitBlocker(int wait_options, ProcessID& waitee_pid);
  197. virtual bool should_unblock(Thread&) override;
  198. virtual const char* state_string() const override { return "Waiting"; }
  199. private:
  200. int m_wait_options { 0 };
  201. ProcessID& m_waitee_pid;
  202. };
  203. class SemiPermanentBlocker final : public Blocker {
  204. public:
  205. enum class Reason {
  206. Signal,
  207. };
  208. SemiPermanentBlocker(Reason reason);
  209. virtual bool should_unblock(Thread&) override;
  210. virtual const char* state_string() const override
  211. {
  212. switch (m_reason) {
  213. case Reason::Signal:
  214. return "Signal";
  215. }
  216. ASSERT_NOT_REACHED();
  217. }
  218. virtual bool is_reason_signal() const override { return m_reason == Reason::Signal; }
  219. private:
  220. Reason m_reason;
  221. };
  222. void did_schedule() { ++m_times_scheduled; }
  223. u32 times_scheduled() const { return m_times_scheduled; }
  224. void resume_from_stopped();
  225. bool is_stopped() const { return m_state == Stopped; }
  226. bool is_blocked() const { return m_state == Blocked; }
  227. bool has_blocker() const
  228. {
  229. ASSERT(m_lock.own_lock());
  230. return m_blocker != nullptr;
  231. }
  232. const Blocker& blocker() const;
  233. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  234. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  235. u32 affinity() const { return m_cpu_affinity; }
  236. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  237. u32 stack_ptr() const { return m_tss.esp; }
  238. RegisterState& get_register_dump_from_stack();
  239. TSS32& tss() { return m_tss; }
  240. const TSS32& tss() const { return m_tss; }
  241. State state() const { return m_state; }
  242. const char* state_string() const;
  243. u32 ticks() const { return m_ticks; }
  244. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  245. size_t thread_specific_region_size() const { return m_thread_specific_region_size; }
  246. u64 sleep(u64 ticks);
  247. u64 sleep_until(u64 wakeup_time);
  248. class BlockResult {
  249. public:
  250. enum Type {
  251. WokeNormally,
  252. NotBlocked,
  253. InterruptedBySignal,
  254. InterruptedByDeath,
  255. InterruptedByTimeout,
  256. };
  257. BlockResult() = delete;
  258. BlockResult(Type type)
  259. : m_type(type)
  260. {
  261. }
  262. bool operator==(Type type) const
  263. {
  264. return m_type == type;
  265. }
  266. bool was_interrupted() const
  267. {
  268. switch (m_type) {
  269. case InterruptedBySignal:
  270. case InterruptedByDeath:
  271. case InterruptedByTimeout:
  272. return true;
  273. default:
  274. return false;
  275. }
  276. }
  277. private:
  278. Type m_type;
  279. };
  280. template<typename T, class... Args>
  281. [[nodiscard]] BlockResult block(timespec* timeout, Args&&... args)
  282. {
  283. T t(forward<Args>(args)...);
  284. {
  285. ScopedSpinLock lock(m_lock);
  286. // We should never be blocking a blocked (or otherwise non-active) thread.
  287. ASSERT(state() == Thread::Running);
  288. ASSERT(m_blocker == nullptr);
  289. if (t.should_unblock(*this)) {
  290. // Don't block if the wake condition is already met
  291. return BlockResult::NotBlocked;
  292. }
  293. m_blocker = &t;
  294. m_blocker_timeout = t.override_timeout(timeout);
  295. set_state(Thread::Blocked);
  296. }
  297. // Yield to the scheduler, and wait for us to resume unblocked.
  298. yield_without_holding_big_lock();
  299. ScopedSpinLock lock(m_lock);
  300. // We should no longer be blocked once we woke up
  301. ASSERT(state() != Thread::Blocked);
  302. // Remove ourselves...
  303. m_blocker = nullptr;
  304. m_blocker_timeout = nullptr;
  305. if (t.was_interrupted_by_signal())
  306. return BlockResult::InterruptedBySignal;
  307. if (t.was_interrupted_by_death())
  308. return BlockResult::InterruptedByDeath;
  309. return BlockResult::WokeNormally;
  310. }
  311. [[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
  312. {
  313. return block<ConditionBlocker>(nullptr, state_string, move(condition));
  314. }
  315. BlockResult wait_on(WaitQueue& queue, const char* reason, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr);
  316. void wake_from_queue();
  317. void unblock();
  318. // Tell this thread to unblock if needed,
  319. // gracefully unwind the stack and die.
  320. void set_should_die();
  321. bool should_die() const { return m_should_die; }
  322. void die_if_needed();
  323. bool tick();
  324. void set_ticks_left(u32 t) { m_ticks_left = t; }
  325. u32 ticks_left() const { return m_ticks_left; }
  326. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  327. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  328. void set_state(State);
  329. bool is_initialized() const { return m_initialized; }
  330. void set_initialized(bool initialized) { m_initialized = initialized; }
  331. void send_urgent_signal_to_self(u8 signal);
  332. void send_signal(u8 signal, Process* sender);
  333. void consider_unblock(time_t now_sec, long now_usec);
  334. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  335. ShouldUnblockThread dispatch_one_pending_signal();
  336. ShouldUnblockThread dispatch_signal(u8 signal);
  337. bool has_unmasked_pending_signals() const { return m_pending_signals & ~m_signal_mask; }
  338. void terminate_due_to_signal(u8 signal);
  339. bool should_ignore_signal(u8 signal) const;
  340. bool has_signal_handler(u8 signal) const;
  341. bool has_pending_signal(u8 signal) const { return m_pending_signals & (1 << (signal - 1)); }
  342. FPUState& fpu_state() { return *m_fpu_state; }
  343. void set_default_signal_dispositions();
  344. void push_value_on_stack(FlatPtr);
  345. u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment, Vector<AuxiliaryValue>);
  346. void make_thread_specific_region(Badge<Process>);
  347. unsigned syscall_count() const { return m_syscall_count; }
  348. void did_syscall() { ++m_syscall_count; }
  349. unsigned inode_faults() const { return m_inode_faults; }
  350. void did_inode_fault() { ++m_inode_faults; }
  351. unsigned zero_faults() const { return m_zero_faults; }
  352. void did_zero_fault() { ++m_zero_faults; }
  353. unsigned cow_faults() const { return m_cow_faults; }
  354. void did_cow_fault() { ++m_cow_faults; }
  355. unsigned file_read_bytes() const { return m_file_read_bytes; }
  356. unsigned file_write_bytes() const { return m_file_write_bytes; }
  357. void did_file_read(unsigned bytes)
  358. {
  359. m_file_read_bytes += bytes;
  360. }
  361. void did_file_write(unsigned bytes)
  362. {
  363. m_file_write_bytes += bytes;
  364. }
  365. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  366. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  367. void did_unix_socket_read(unsigned bytes)
  368. {
  369. m_unix_socket_read_bytes += bytes;
  370. }
  371. void did_unix_socket_write(unsigned bytes)
  372. {
  373. m_unix_socket_write_bytes += bytes;
  374. }
  375. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  376. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  377. void did_ipv4_socket_read(unsigned bytes)
  378. {
  379. m_ipv4_socket_read_bytes += bytes;
  380. }
  381. void did_ipv4_socket_write(unsigned bytes)
  382. {
  383. m_ipv4_socket_write_bytes += bytes;
  384. }
  385. const char* wait_reason() const
  386. {
  387. return m_wait_reason;
  388. }
  389. void set_active(bool active)
  390. {
  391. ASSERT(g_scheduler_lock.own_lock());
  392. m_is_active = active;
  393. }
  394. bool is_finalizable() const
  395. {
  396. ASSERT(g_scheduler_lock.own_lock());
  397. return !m_is_active;
  398. }
  399. Thread* clone(Process&);
  400. template<typename Callback>
  401. static IterationDecision for_each_in_state(State, Callback);
  402. template<typename Callback>
  403. static IterationDecision for_each_living(Callback);
  404. template<typename Callback>
  405. static IterationDecision for_each(Callback);
  406. static bool is_runnable_state(Thread::State state)
  407. {
  408. return state == Thread::State::Running || state == Thread::State::Runnable;
  409. }
  410. static constexpr u32 default_kernel_stack_size = 65536;
  411. static constexpr u32 default_userspace_stack_size = 4 * MB;
  412. ThreadTracer* tracer() { return m_tracer.ptr(); }
  413. void start_tracing_from(ProcessID tracer);
  414. void stop_tracing();
  415. void tracer_trap(const RegisterState&);
  416. RecursiveSpinLock& get_lock() const { return m_lock; }
  417. private:
  418. IntrusiveListNode m_runnable_list_node;
  419. IntrusiveListNode m_wait_queue_node;
  420. private:
  421. friend class SchedulerData;
  422. friend class WaitQueue;
  423. bool unlock_process_if_locked();
  424. void relock_process(bool did_unlock);
  425. String backtrace_impl();
  426. void reset_fpu_state();
  427. mutable RecursiveSpinLock m_lock;
  428. NonnullRefPtr<Process> m_process;
  429. ThreadID m_tid { -1 };
  430. TSS32 m_tss;
  431. Atomic<u32> m_cpu { 0 };
  432. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  433. u32 m_ticks { 0 };
  434. u32 m_ticks_left { 0 };
  435. u32 m_times_scheduled { 0 };
  436. u32 m_pending_signals { 0 };
  437. u32 m_signal_mask { 0 };
  438. u32 m_kernel_stack_base { 0 };
  439. u32 m_kernel_stack_top { 0 };
  440. OwnPtr<Region> m_kernel_stack_region;
  441. VirtualAddress m_thread_specific_data;
  442. size_t m_thread_specific_region_size { 0 };
  443. SignalActionData m_signal_action_data[32];
  444. Blocker* m_blocker { nullptr };
  445. timespec* m_blocker_timeout { nullptr };
  446. const char* m_wait_reason { nullptr };
  447. bool m_is_active { false };
  448. bool m_is_joinable { true };
  449. Thread* m_joiner { nullptr };
  450. Thread* m_joinee { nullptr };
  451. void* m_exit_value { nullptr };
  452. unsigned m_syscall_count { 0 };
  453. unsigned m_inode_faults { 0 };
  454. unsigned m_zero_faults { 0 };
  455. unsigned m_cow_faults { 0 };
  456. unsigned m_file_read_bytes { 0 };
  457. unsigned m_file_write_bytes { 0 };
  458. unsigned m_unix_socket_read_bytes { 0 };
  459. unsigned m_unix_socket_write_bytes { 0 };
  460. unsigned m_ipv4_socket_read_bytes { 0 };
  461. unsigned m_ipv4_socket_write_bytes { 0 };
  462. FPUState* m_fpu_state { nullptr };
  463. State m_state { Invalid };
  464. String m_name;
  465. u32 m_priority { THREAD_PRIORITY_NORMAL };
  466. u32 m_extra_priority { 0 };
  467. u32 m_priority_boost { 0 };
  468. u8 m_stop_signal { 0 };
  469. State m_stop_state { Invalid };
  470. bool m_dump_backtrace_on_finalization { false };
  471. bool m_should_die { false };
  472. bool m_initialized { false };
  473. OwnPtr<ThreadTracer> m_tracer;
  474. void yield_without_holding_big_lock();
  475. void update_state_for_thread(Thread::State previous_state);
  476. };
  477. HashTable<Thread*>& thread_table();
  478. template<typename Callback>
  479. inline IterationDecision Thread::for_each_living(Callback callback)
  480. {
  481. ASSERT_INTERRUPTS_DISABLED();
  482. return Thread::for_each([callback](Thread& thread) -> IterationDecision {
  483. if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
  484. return callback(thread);
  485. return IterationDecision::Continue;
  486. });
  487. }
  488. template<typename Callback>
  489. inline IterationDecision Thread::for_each(Callback callback)
  490. {
  491. ASSERT_INTERRUPTS_DISABLED();
  492. ScopedSpinLock lock(g_scheduler_lock);
  493. auto ret = Scheduler::for_each_runnable(callback);
  494. if (ret == IterationDecision::Break)
  495. return ret;
  496. return Scheduler::for_each_nonrunnable(callback);
  497. }
  498. template<typename Callback>
  499. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  500. {
  501. ASSERT_INTERRUPTS_DISABLED();
  502. ScopedSpinLock lock(g_scheduler_lock);
  503. auto new_callback = [=](Thread& thread) -> IterationDecision {
  504. if (thread.state() == state)
  505. return callback(thread);
  506. return IterationDecision::Continue;
  507. };
  508. if (is_runnable_state(state))
  509. return Scheduler::for_each_runnable(new_callback);
  510. return Scheduler::for_each_nonrunnable(new_callback);
  511. }
  512. const LogStream& operator<<(const LogStream&, const Thread&);
  513. struct SchedulerData {
  514. typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
  515. ThreadList m_runnable_threads;
  516. ThreadList m_nonrunnable_threads;
  517. bool has_thread(Thread& thread) const
  518. {
  519. return m_runnable_threads.contains(thread) || m_nonrunnable_threads.contains(thread);
  520. }
  521. ThreadList& thread_list_for_state(Thread::State state)
  522. {
  523. if (Thread::is_runnable_state(state))
  524. return m_runnable_threads;
  525. return m_nonrunnable_threads;
  526. }
  527. };
  528. template<typename Callback>
  529. inline IterationDecision Scheduler::for_each_runnable(Callback callback)
  530. {
  531. ASSERT_INTERRUPTS_DISABLED();
  532. ASSERT(g_scheduler_lock.own_lock());
  533. auto& tl = g_scheduler_data->m_runnable_threads;
  534. for (auto it = tl.begin(); it != tl.end();) {
  535. auto& thread = *it;
  536. it = ++it;
  537. if (callback(thread) == IterationDecision::Break)
  538. return IterationDecision::Break;
  539. }
  540. return IterationDecision::Continue;
  541. }
  542. template<typename Callback>
  543. inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
  544. {
  545. ASSERT_INTERRUPTS_DISABLED();
  546. ASSERT(g_scheduler_lock.own_lock());
  547. auto& tl = g_scheduler_data->m_nonrunnable_threads;
  548. for (auto it = tl.begin(); it != tl.end();) {
  549. auto& thread = *it;
  550. it = ++it;
  551. if (callback(thread) == IterationDecision::Break)
  552. return IterationDecision::Break;
  553. }
  554. return IterationDecision::Continue;
  555. }
  556. }