Thread.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/IntrusiveList.h>
  29. #include <AK/Optional.h>
  30. #include <AK/OwnPtr.h>
  31. #include <AK/String.h>
  32. #include <AK/Vector.h>
  33. #include <Kernel/Arch/i386/CPU.h>
  34. #include <Kernel/Forward.h>
  35. #include <Kernel/KResult.h>
  36. #include <Kernel/Scheduler.h>
  37. #include <Kernel/ThreadTracer.h>
  38. #include <Kernel/UnixTypes.h>
  39. #include <LibC/fd_set.h>
  40. #include <LibELF/AuxiliaryVector.h>
  41. namespace Kernel {
  42. enum class ShouldUnblockThread {
  43. No = 0,
  44. Yes
  45. };
  46. struct SignalActionData {
  47. VirtualAddress handler_or_sigaction;
  48. u32 mask { 0 };
  49. int flags { 0 };
  50. };
  51. struct ThreadSpecificData {
  52. ThreadSpecificData* self;
  53. };
  54. #define THREAD_PRIORITY_MIN 1
  55. #define THREAD_PRIORITY_LOW 10
  56. #define THREAD_PRIORITY_NORMAL 30
  57. #define THREAD_PRIORITY_HIGH 50
  58. #define THREAD_PRIORITY_MAX 99
  59. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  60. class Thread {
  61. AK_MAKE_NONCOPYABLE(Thread);
  62. AK_MAKE_NONMOVABLE(Thread);
  63. friend class Process;
  64. friend class Scheduler;
  65. public:
  66. inline static Thread* current()
  67. {
  68. return Processor::current().current_thread();
  69. }
  70. explicit Thread(NonnullRefPtr<Process>);
  71. ~Thread();
  72. static Thread* from_tid(int);
  73. static void finalize_dying_threads();
  74. int tid() const { return m_tid; }
  75. int pid() const;
  76. void set_priority(u32 p) { m_priority = p; }
  77. u32 priority() const { return m_priority; }
  78. void set_priority_boost(u32 boost) { m_priority_boost = boost; }
  79. u32 priority_boost() const { return m_priority_boost; }
  80. u32 effective_priority() const;
  81. void set_joinable(bool j) { m_is_joinable = j; }
  82. bool is_joinable() const { return m_is_joinable; }
  83. Process& process() { return m_process; }
  84. const Process& process() const { return m_process; }
  85. String backtrace();
  86. Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
  87. const String& name() const { return m_name; }
  88. void set_name(const StringView& s) { m_name = s; }
  89. void finalize();
  90. enum State : u8 {
  91. Invalid = 0,
  92. Runnable,
  93. Running,
  94. Skip1SchedulerPass,
  95. Skip0SchedulerPasses,
  96. Dying,
  97. Dead,
  98. Stopped,
  99. Blocked,
  100. Queued,
  101. };
  102. class Blocker {
  103. public:
  104. virtual ~Blocker() { }
  105. virtual bool should_unblock(Thread& thread, time_t, long)
  106. {
  107. return should_unblock(thread);
  108. }
  109. virtual bool should_unblock(Thread&) = 0;
  110. virtual const char* state_string() const = 0;
  111. virtual bool is_reason_signal() const { return false; }
  112. void set_interrupted_by_death() { m_was_interrupted_by_death = true; }
  113. bool was_interrupted_by_death() const { return m_was_interrupted_by_death; }
  114. void set_interrupted_by_signal() { m_was_interrupted_while_blocked = true; }
  115. bool was_interrupted_by_signal() const { return m_was_interrupted_while_blocked; }
  116. private:
  117. bool m_was_interrupted_while_blocked { false };
  118. bool m_was_interrupted_by_death { false };
  119. friend class Thread;
  120. };
  121. class JoinBlocker final : public Blocker {
  122. public:
  123. explicit JoinBlocker(Thread& joinee, void*& joinee_exit_value);
  124. virtual bool should_unblock(Thread&) override;
  125. virtual const char* state_string() const override { return "Joining"; }
  126. void set_joinee_exit_value(void* value) { m_joinee_exit_value = value; }
  127. private:
  128. Thread& m_joinee;
  129. void*& m_joinee_exit_value;
  130. };
  131. class FileDescriptionBlocker : public Blocker {
  132. public:
  133. const FileDescription& blocked_description() const;
  134. protected:
  135. explicit FileDescriptionBlocker(const FileDescription&);
  136. private:
  137. NonnullRefPtr<FileDescription> m_blocked_description;
  138. };
  139. class AcceptBlocker final : public FileDescriptionBlocker {
  140. public:
  141. explicit AcceptBlocker(const FileDescription&);
  142. virtual bool should_unblock(Thread&) override;
  143. virtual const char* state_string() const override { return "Accepting"; }
  144. };
  145. class ConnectBlocker final : public FileDescriptionBlocker {
  146. public:
  147. explicit ConnectBlocker(const FileDescription&);
  148. virtual bool should_unblock(Thread&) override;
  149. virtual const char* state_string() const override { return "Connecting"; }
  150. };
  151. class WriteBlocker final : public FileDescriptionBlocker {
  152. public:
  153. explicit WriteBlocker(const FileDescription&);
  154. virtual bool should_unblock(Thread&, time_t, long) override;
  155. virtual bool should_unblock(Thread&) override;
  156. virtual const char* state_string() const override { return "Writing"; }
  157. private:
  158. Optional<timeval> m_deadline;
  159. };
  160. class ReadBlocker final : public FileDescriptionBlocker {
  161. public:
  162. explicit ReadBlocker(const FileDescription&);
  163. virtual bool should_unblock(Thread&, time_t, long) override;
  164. virtual bool should_unblock(Thread&) override;
  165. virtual const char* state_string() const override { return "Reading"; }
  166. private:
  167. Optional<timeval> m_deadline;
  168. };
  169. class ConditionBlocker final : public Blocker {
  170. public:
  171. ConditionBlocker(const char* state_string, Function<bool()>&& condition);
  172. virtual bool should_unblock(Thread&) override;
  173. virtual const char* state_string() const override { return m_state_string; }
  174. private:
  175. Function<bool()> m_block_until_condition;
  176. const char* m_state_string { nullptr };
  177. };
  178. class SleepBlocker final : public Blocker {
  179. public:
  180. explicit SleepBlocker(u64 wakeup_time);
  181. virtual bool should_unblock(Thread&) override;
  182. virtual const char* state_string() const override { return "Sleeping"; }
  183. private:
  184. u64 m_wakeup_time { 0 };
  185. };
  186. class SelectBlocker final : public Blocker {
  187. public:
  188. typedef Vector<int, FD_SETSIZE> FDVector;
  189. SelectBlocker(const timespec& ts, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
  190. virtual bool should_unblock(Thread&, time_t, long) override;
  191. virtual bool should_unblock(Thread&) override;
  192. virtual const char* state_string() const override { return "Selecting"; }
  193. private:
  194. timespec m_select_timeout;
  195. bool m_select_has_timeout { false };
  196. const FDVector& m_select_read_fds;
  197. const FDVector& m_select_write_fds;
  198. const FDVector& m_select_exceptional_fds;
  199. };
  200. class WaitBlocker final : public Blocker {
  201. public:
  202. WaitBlocker(int wait_options, pid_t& waitee_pid);
  203. virtual bool should_unblock(Thread&) override;
  204. virtual const char* state_string() const override { return "Waiting"; }
  205. private:
  206. int m_wait_options { 0 };
  207. pid_t& m_waitee_pid;
  208. };
  209. class SemiPermanentBlocker final : public Blocker {
  210. public:
  211. enum class Reason {
  212. Signal,
  213. };
  214. SemiPermanentBlocker(Reason reason);
  215. virtual bool should_unblock(Thread&) override;
  216. virtual const char* state_string() const override
  217. {
  218. switch (m_reason) {
  219. case Reason::Signal:
  220. return "Signal";
  221. }
  222. ASSERT_NOT_REACHED();
  223. }
  224. virtual bool is_reason_signal() const override { return m_reason == Reason::Signal; }
  225. private:
  226. Reason m_reason;
  227. };
  228. void did_schedule() { ++m_times_scheduled; }
  229. u32 times_scheduled() const { return m_times_scheduled; }
  230. bool is_stopped() const { return m_state == Stopped; }
  231. bool is_blocked() const { return m_state == Blocked; }
  232. bool has_blocker() const
  233. {
  234. ASSERT(m_lock.own_lock());
  235. return m_blocker != nullptr;
  236. }
  237. const Blocker& blocker() const;
  238. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  239. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  240. u32 affinity() const { return m_cpu_affinity; }
  241. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  242. u32 stack_ptr() const { return m_tss.esp; }
  243. RegisterState& get_register_dump_from_stack();
  244. TSS32& tss() { return m_tss; }
  245. const TSS32& tss() const { return m_tss; }
  246. State state() const { return m_state; }
  247. const char* state_string() const;
  248. u32 ticks() const { return m_ticks; }
  249. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  250. size_t thread_specific_region_size() const { return m_thread_specific_region_size; }
  251. u64 sleep(u64 ticks);
  252. u64 sleep_until(u64 wakeup_time);
  253. class BlockResult {
  254. public:
  255. enum Type {
  256. WokeNormally,
  257. NotBlocked,
  258. InterruptedBySignal,
  259. InterruptedByDeath,
  260. InterruptedByTimeout,
  261. };
  262. BlockResult() = delete;
  263. BlockResult(Type type)
  264. : m_type(type)
  265. {
  266. }
  267. bool operator==(Type type) const
  268. {
  269. return m_type == type;
  270. }
  271. bool was_interrupted() const
  272. {
  273. switch (m_type) {
  274. case InterruptedBySignal:
  275. case InterruptedByDeath:
  276. case InterruptedByTimeout:
  277. return true;
  278. default:
  279. return false;
  280. }
  281. }
  282. private:
  283. Type m_type;
  284. };
  285. template<typename T, class... Args>
  286. [[nodiscard]] BlockResult block(Args&&... args)
  287. {
  288. T t(forward<Args>(args)...);
  289. {
  290. ScopedSpinLock lock(m_lock);
  291. // We should never be blocking a blocked (or otherwise non-active) thread.
  292. ASSERT(state() == Thread::Running);
  293. ASSERT(m_blocker == nullptr);
  294. if (t.should_unblock(*this)) {
  295. // Don't block if the wake condition is already met
  296. return BlockResult::NotBlocked;
  297. }
  298. m_blocker = &t;
  299. set_state(Thread::Blocked);
  300. }
  301. // Yield to the scheduler, and wait for us to resume unblocked.
  302. yield_without_holding_big_lock();
  303. ScopedSpinLock lock(m_lock);
  304. // We should no longer be blocked once we woke up
  305. ASSERT(state() != Thread::Blocked);
  306. // Remove ourselves...
  307. m_blocker = nullptr;
  308. if (t.was_interrupted_by_signal())
  309. return BlockResult::InterruptedBySignal;
  310. if (t.was_interrupted_by_death())
  311. return BlockResult::InterruptedByDeath;
  312. return BlockResult::WokeNormally;
  313. }
  314. [[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
  315. {
  316. return block<ConditionBlocker>(state_string, move(condition));
  317. }
  318. BlockResult wait_on(WaitQueue& queue, const char* reason, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr);
  319. void wake_from_queue();
  320. void unblock();
  321. // Tell this thread to unblock if needed,
  322. // gracefully unwind the stack and die.
  323. void set_should_die();
  324. void die_if_needed();
  325. bool tick();
  326. void set_ticks_left(u32 t) { m_ticks_left = t; }
  327. u32 ticks_left() const { return m_ticks_left; }
  328. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  329. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  330. void set_state(State);
  331. bool is_initialized() const { return m_initialized; }
  332. void set_initialized(bool initialized) { m_initialized = initialized; }
  333. void send_urgent_signal_to_self(u8 signal);
  334. void send_signal(u8 signal, Process* sender);
  335. void consider_unblock(time_t now_sec, long now_usec);
  336. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  337. ShouldUnblockThread dispatch_one_pending_signal();
  338. ShouldUnblockThread dispatch_signal(u8 signal);
  339. bool has_unmasked_pending_signals() const { return m_pending_signals & ~m_signal_mask; }
  340. void terminate_due_to_signal(u8 signal);
  341. bool should_ignore_signal(u8 signal) const;
  342. bool has_signal_handler(u8 signal) const;
  343. bool has_pending_signal(u8 signal) const { return m_pending_signals & (1 << (signal - 1)); }
  344. FPUState& fpu_state() { return *m_fpu_state; }
  345. void set_default_signal_dispositions();
  346. void push_value_on_stack(FlatPtr);
  347. u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment, Vector<AuxiliaryValue>);
  348. void make_thread_specific_region(Badge<Process>);
  349. unsigned syscall_count() const { return m_syscall_count; }
  350. void did_syscall() { ++m_syscall_count; }
  351. unsigned inode_faults() const { return m_inode_faults; }
  352. void did_inode_fault() { ++m_inode_faults; }
  353. unsigned zero_faults() const { return m_zero_faults; }
  354. void did_zero_fault() { ++m_zero_faults; }
  355. unsigned cow_faults() const { return m_cow_faults; }
  356. void did_cow_fault() { ++m_cow_faults; }
  357. unsigned file_read_bytes() const { return m_file_read_bytes; }
  358. unsigned file_write_bytes() const { return m_file_write_bytes; }
  359. void did_file_read(unsigned bytes)
  360. {
  361. m_file_read_bytes += bytes;
  362. }
  363. void did_file_write(unsigned bytes)
  364. {
  365. m_file_write_bytes += bytes;
  366. }
  367. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  368. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  369. void did_unix_socket_read(unsigned bytes)
  370. {
  371. m_unix_socket_read_bytes += bytes;
  372. }
  373. void did_unix_socket_write(unsigned bytes)
  374. {
  375. m_unix_socket_write_bytes += bytes;
  376. }
  377. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  378. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  379. void did_ipv4_socket_read(unsigned bytes)
  380. {
  381. m_ipv4_socket_read_bytes += bytes;
  382. }
  383. void did_ipv4_socket_write(unsigned bytes)
  384. {
  385. m_ipv4_socket_write_bytes += bytes;
  386. }
  387. const char* wait_reason() const
  388. {
  389. return m_wait_reason;
  390. }
  391. void set_active(bool active)
  392. {
  393. ASSERT(g_scheduler_lock.own_lock());
  394. m_is_active = active;
  395. }
  396. bool is_finalizable() const
  397. {
  398. ASSERT(g_scheduler_lock.own_lock());
  399. return !m_is_active;
  400. }
  401. Thread* clone(Process&);
  402. template<typename Callback>
  403. static IterationDecision for_each_in_state(State, Callback);
  404. template<typename Callback>
  405. static IterationDecision for_each_living(Callback);
  406. template<typename Callback>
  407. static IterationDecision for_each(Callback);
  408. static bool is_runnable_state(Thread::State state)
  409. {
  410. return state == Thread::State::Running || state == Thread::State::Runnable;
  411. }
  412. static constexpr u32 default_kernel_stack_size = 65536;
  413. static constexpr u32 default_userspace_stack_size = 4 * MB;
  414. ThreadTracer* tracer() { return m_tracer.ptr(); }
  415. void start_tracing_from(pid_t tracer);
  416. void stop_tracing();
  417. void tracer_trap(const RegisterState&);
  418. RecursiveSpinLock& get_lock() const { return m_lock; }
  419. private:
  420. IntrusiveListNode m_runnable_list_node;
  421. IntrusiveListNode m_wait_queue_node;
  422. private:
  423. friend class SchedulerData;
  424. friend class WaitQueue;
  425. bool unlock_process_if_locked();
  426. void relock_process(bool did_unlock);
  427. String backtrace_impl();
  428. void reset_fpu_state();
  429. mutable RecursiveSpinLock m_lock;
  430. NonnullRefPtr<Process> m_process;
  431. int m_tid { -1 };
  432. TSS32 m_tss;
  433. Atomic<u32> m_cpu { 0 };
  434. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  435. u32 m_ticks { 0 };
  436. u32 m_ticks_left { 0 };
  437. u32 m_times_scheduled { 0 };
  438. u32 m_pending_signals { 0 };
  439. u32 m_signal_mask { 0 };
  440. u32 m_kernel_stack_base { 0 };
  441. u32 m_kernel_stack_top { 0 };
  442. OwnPtr<Region> m_kernel_stack_region;
  443. VirtualAddress m_thread_specific_data;
  444. size_t m_thread_specific_region_size { 0 };
  445. SignalActionData m_signal_action_data[32];
  446. Blocker* m_blocker { nullptr };
  447. const char* m_wait_reason { nullptr };
  448. bool m_is_active { false };
  449. bool m_is_joinable { true };
  450. Thread* m_joiner { nullptr };
  451. Thread* m_joinee { nullptr };
  452. void* m_exit_value { nullptr };
  453. unsigned m_syscall_count { 0 };
  454. unsigned m_inode_faults { 0 };
  455. unsigned m_zero_faults { 0 };
  456. unsigned m_cow_faults { 0 };
  457. unsigned m_file_read_bytes { 0 };
  458. unsigned m_file_write_bytes { 0 };
  459. unsigned m_unix_socket_read_bytes { 0 };
  460. unsigned m_unix_socket_write_bytes { 0 };
  461. unsigned m_ipv4_socket_read_bytes { 0 };
  462. unsigned m_ipv4_socket_write_bytes { 0 };
  463. FPUState* m_fpu_state { nullptr };
  464. State m_state { Invalid };
  465. String m_name;
  466. u32 m_priority { THREAD_PRIORITY_NORMAL };
  467. u32 m_extra_priority { 0 };
  468. u32 m_priority_boost { 0 };
  469. u8 m_stop_signal { 0 };
  470. State m_stop_state { Invalid };
  471. bool m_dump_backtrace_on_finalization { false };
  472. bool m_should_die { false };
  473. bool m_initialized { false };
  474. OwnPtr<ThreadTracer> m_tracer;
  475. void yield_without_holding_big_lock();
  476. void update_state_for_thread(Thread::State previous_state);
  477. };
  478. HashTable<Thread*>& thread_table();
  479. template<typename Callback>
  480. inline IterationDecision Thread::for_each_living(Callback callback)
  481. {
  482. ASSERT_INTERRUPTS_DISABLED();
  483. return Thread::for_each([callback](Thread& thread) -> IterationDecision {
  484. if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
  485. return callback(thread);
  486. return IterationDecision::Continue;
  487. });
  488. }
  489. template<typename Callback>
  490. inline IterationDecision Thread::for_each(Callback callback)
  491. {
  492. ASSERT_INTERRUPTS_DISABLED();
  493. ScopedSpinLock lock(g_scheduler_lock);
  494. auto ret = Scheduler::for_each_runnable(callback);
  495. if (ret == IterationDecision::Break)
  496. return ret;
  497. return Scheduler::for_each_nonrunnable(callback);
  498. }
  499. template<typename Callback>
  500. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  501. {
  502. ASSERT_INTERRUPTS_DISABLED();
  503. ScopedSpinLock lock(g_scheduler_lock);
  504. auto new_callback = [=](Thread& thread) -> IterationDecision {
  505. if (thread.state() == state)
  506. return callback(thread);
  507. return IterationDecision::Continue;
  508. };
  509. if (is_runnable_state(state))
  510. return Scheduler::for_each_runnable(new_callback);
  511. return Scheduler::for_each_nonrunnable(new_callback);
  512. }
  513. const LogStream& operator<<(const LogStream&, const Thread&);
  514. struct SchedulerData {
  515. typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
  516. ThreadList m_runnable_threads;
  517. ThreadList m_nonrunnable_threads;
  518. bool has_thread(Thread& thread) const
  519. {
  520. return m_runnable_threads.contains(thread) || m_nonrunnable_threads.contains(thread);
  521. }
  522. ThreadList& thread_list_for_state(Thread::State state)
  523. {
  524. if (Thread::is_runnable_state(state))
  525. return m_runnable_threads;
  526. return m_nonrunnable_threads;
  527. }
  528. };
  529. template<typename Callback>
  530. inline IterationDecision Scheduler::for_each_runnable(Callback callback)
  531. {
  532. ASSERT_INTERRUPTS_DISABLED();
  533. ASSERT(g_scheduler_lock.own_lock());
  534. auto& tl = g_scheduler_data->m_runnable_threads;
  535. for (auto it = tl.begin(); it != tl.end();) {
  536. auto& thread = *it;
  537. it = ++it;
  538. if (callback(thread) == IterationDecision::Break)
  539. return IterationDecision::Break;
  540. }
  541. return IterationDecision::Continue;
  542. }
  543. template<typename Callback>
  544. inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
  545. {
  546. ASSERT_INTERRUPTS_DISABLED();
  547. ASSERT(g_scheduler_lock.own_lock());
  548. auto& tl = g_scheduler_data->m_nonrunnable_threads;
  549. for (auto it = tl.begin(); it != tl.end();) {
  550. auto& thread = *it;
  551. it = ++it;
  552. if (callback(thread) == IterationDecision::Break)
  553. return IterationDecision::Break;
  554. }
  555. return IterationDecision::Continue;
  556. }
  557. }