Thread.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/IntrusiveList.h>
  29. #include <AK/Optional.h>
  30. #include <AK/OwnPtr.h>
  31. #include <AK/String.h>
  32. #include <AK/Vector.h>
  33. #include <Kernel/Arch/i386/CPU.h>
  34. #include <Kernel/Forward.h>
  35. #include <Kernel/KResult.h>
  36. #include <Kernel/Scheduler.h>
  37. #include <Kernel/ThreadTracer.h>
  38. #include <Kernel/UnixTypes.h>
  39. #include <LibC/fd_set.h>
  40. #include <LibELF/AuxiliaryVector.h>
  41. namespace Kernel {
  42. enum class ShouldUnblockThread {
  43. No = 0,
  44. Yes
  45. };
  46. struct SignalActionData {
  47. VirtualAddress handler_or_sigaction;
  48. u32 mask { 0 };
  49. int flags { 0 };
  50. };
  51. struct ThreadSpecificData {
  52. ThreadSpecificData* self;
  53. };
  54. #define THREAD_PRIORITY_MIN 1
  55. #define THREAD_PRIORITY_LOW 10
  56. #define THREAD_PRIORITY_NORMAL 30
  57. #define THREAD_PRIORITY_HIGH 50
  58. #define THREAD_PRIORITY_MAX 99
  59. #define THREAD_AFFINITY_DEFAULT 0xffffffff
  60. class Thread {
  61. AK_MAKE_NONCOPYABLE(Thread);
  62. AK_MAKE_NONMOVABLE(Thread);
  63. friend class Process;
  64. friend class Scheduler;
  65. public:
  66. inline static Thread* current()
  67. {
  68. return Processor::current().current_thread();
  69. }
  70. explicit Thread(Process&);
  71. ~Thread();
  72. static Thread* from_tid(int);
  73. static void finalize_dying_threads();
  74. static Vector<Thread*> all_threads();
  75. static bool is_thread(void*);
  76. int tid() const { return m_tid; }
  77. int pid() const;
  78. void set_priority(u32 p) { m_priority = p; }
  79. u32 priority() const { return m_priority; }
  80. void set_priority_boost(u32 boost) { m_priority_boost = boost; }
  81. u32 priority_boost() const { return m_priority_boost; }
  82. u32 effective_priority() const;
  83. void set_joinable(bool j) { m_is_joinable = j; }
  84. bool is_joinable() const { return m_is_joinable; }
  85. Process& process() { return m_process; }
  86. const Process& process() const { return m_process; }
  87. String backtrace(ProcessInspectionHandle&);
  88. Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
  89. const String& name() const { return m_name; }
  90. void set_name(const StringView& s) { m_name = s; }
  91. void finalize();
  92. enum State : u8 {
  93. Invalid = 0,
  94. Runnable,
  95. Running,
  96. Skip1SchedulerPass,
  97. Skip0SchedulerPasses,
  98. Dying,
  99. Dead,
  100. Stopped,
  101. Blocked,
  102. Queued,
  103. };
  104. class Blocker {
  105. public:
  106. virtual ~Blocker() { }
  107. virtual bool should_unblock(Thread&, time_t now_s, long us) = 0;
  108. virtual const char* state_string() const = 0;
  109. virtual bool is_reason_signal() const { return false; }
  110. void set_interrupted_by_death() { m_was_interrupted_by_death = true; }
  111. bool was_interrupted_by_death() const { return m_was_interrupted_by_death; }
  112. void set_interrupted_by_signal() { m_was_interrupted_while_blocked = true; }
  113. bool was_interrupted_by_signal() const { return m_was_interrupted_while_blocked; }
  114. private:
  115. bool m_was_interrupted_while_blocked { false };
  116. bool m_was_interrupted_by_death { false };
  117. friend class Thread;
  118. };
  119. class JoinBlocker final : public Blocker {
  120. public:
  121. explicit JoinBlocker(Thread& joinee, void*& joinee_exit_value);
  122. virtual bool should_unblock(Thread&, time_t now_s, long us) override;
  123. virtual const char* state_string() const override { return "Joining"; }
  124. void set_joinee_exit_value(void* value) { m_joinee_exit_value = value; }
  125. private:
  126. Thread& m_joinee;
  127. void*& m_joinee_exit_value;
  128. };
  129. class FileDescriptionBlocker : public Blocker {
  130. public:
  131. const FileDescription& blocked_description() const;
  132. protected:
  133. explicit FileDescriptionBlocker(const FileDescription&);
  134. private:
  135. NonnullRefPtr<FileDescription> m_blocked_description;
  136. };
  137. class AcceptBlocker final : public FileDescriptionBlocker {
  138. public:
  139. explicit AcceptBlocker(const FileDescription&);
  140. virtual bool should_unblock(Thread&, time_t, long) override;
  141. virtual const char* state_string() const override { return "Accepting"; }
  142. };
  143. class ConnectBlocker final : public FileDescriptionBlocker {
  144. public:
  145. explicit ConnectBlocker(const FileDescription&);
  146. virtual bool should_unblock(Thread&, time_t, long) override;
  147. virtual const char* state_string() const override { return "Connecting"; }
  148. };
  149. class WriteBlocker final : public FileDescriptionBlocker {
  150. public:
  151. explicit WriteBlocker(const FileDescription&);
  152. virtual bool should_unblock(Thread&, time_t, long) override;
  153. virtual const char* state_string() const override { return "Writing"; }
  154. private:
  155. Optional<timeval> m_deadline;
  156. };
  157. class ReadBlocker final : public FileDescriptionBlocker {
  158. public:
  159. explicit ReadBlocker(const FileDescription&);
  160. virtual bool should_unblock(Thread&, time_t, long) override;
  161. virtual const char* state_string() const override { return "Reading"; }
  162. private:
  163. Optional<timeval> m_deadline;
  164. };
  165. class ConditionBlocker final : public Blocker {
  166. public:
  167. ConditionBlocker(const char* state_string, Function<bool()>&& condition);
  168. virtual bool should_unblock(Thread&, time_t, long) override;
  169. virtual const char* state_string() const override { return m_state_string; }
  170. private:
  171. Function<bool()> m_block_until_condition;
  172. const char* m_state_string { nullptr };
  173. };
  174. class SleepBlocker final : public Blocker {
  175. public:
  176. explicit SleepBlocker(u64 wakeup_time);
  177. virtual bool should_unblock(Thread&, time_t, long) override;
  178. virtual const char* state_string() const override { return "Sleeping"; }
  179. private:
  180. u64 m_wakeup_time { 0 };
  181. };
  182. class SelectBlocker final : public Blocker {
  183. public:
  184. typedef Vector<int, FD_SETSIZE> FDVector;
  185. SelectBlocker(const timespec& ts, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
  186. virtual bool should_unblock(Thread&, time_t, long) override;
  187. virtual const char* state_string() const override { return "Selecting"; }
  188. private:
  189. timespec m_select_timeout;
  190. bool m_select_has_timeout { false };
  191. const FDVector& m_select_read_fds;
  192. const FDVector& m_select_write_fds;
  193. const FDVector& m_select_exceptional_fds;
  194. };
  195. class WaitBlocker final : public Blocker {
  196. public:
  197. WaitBlocker(int wait_options, pid_t& waitee_pid);
  198. virtual bool should_unblock(Thread&, time_t, long) override;
  199. virtual const char* state_string() const override { return "Waiting"; }
  200. private:
  201. int m_wait_options { 0 };
  202. pid_t& m_waitee_pid;
  203. };
  204. class SemiPermanentBlocker final : public Blocker {
  205. public:
  206. enum class Reason {
  207. Signal,
  208. };
  209. SemiPermanentBlocker(Reason reason);
  210. virtual bool should_unblock(Thread&, time_t, long) override;
  211. virtual const char* state_string() const override
  212. {
  213. switch (m_reason) {
  214. case Reason::Signal:
  215. return "Signal";
  216. }
  217. ASSERT_NOT_REACHED();
  218. }
  219. virtual bool is_reason_signal() const override { return m_reason == Reason::Signal; }
  220. private:
  221. Reason m_reason;
  222. };
  223. void did_schedule() { ++m_times_scheduled; }
  224. u32 times_scheduled() const { return m_times_scheduled; }
  225. bool is_stopped() const { return m_state == Stopped; }
  226. bool is_blocked() const { return m_state == Blocked; }
  227. bool has_blocker() const { return m_blocker != nullptr; }
  228. const Blocker& blocker() const;
  229. bool in_kernel() const { return (m_tss.cs & 0x03) == 0; }
  230. u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
  231. void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
  232. u32 affinity() const { return m_cpu_affinity; }
  233. void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
  234. u32 stack_ptr() const { return m_tss.esp; }
  235. RegisterState& get_register_dump_from_stack();
  236. TSS32& tss() { return m_tss; }
  237. const TSS32& tss() const { return m_tss; }
  238. State state() const { return m_state; }
  239. const char* state_string() const;
  240. u32 ticks() const { return m_ticks; }
  241. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  242. size_t thread_specific_region_size() const { return m_thread_specific_region_size; }
  243. u64 sleep(u32 ticks);
  244. u64 sleep_until(u64 wakeup_time);
  245. enum class BlockResult {
  246. WokeNormally,
  247. NotBlocked,
  248. InterruptedBySignal,
  249. InterruptedByDeath,
  250. InterruptedByTimeout,
  251. };
  252. template<typename T, class... Args>
  253. [[nodiscard]] BlockResult block(Args&&... args)
  254. {
  255. // We should never be blocking a blocked (or otherwise non-active) thread.
  256. ASSERT(state() == Thread::Running);
  257. ASSERT(m_blocker == nullptr);
  258. T t(forward<Args>(args)...);
  259. m_blocker = &t;
  260. set_state(Thread::Blocked);
  261. // Yield to the scheduler, and wait for us to resume unblocked.
  262. yield_without_holding_big_lock();
  263. // We should no longer be blocked once we woke up
  264. ASSERT(state() != Thread::Blocked);
  265. // Remove ourselves...
  266. m_blocker = nullptr;
  267. if (t.was_interrupted_by_signal())
  268. return BlockResult::InterruptedBySignal;
  269. if (t.was_interrupted_by_death())
  270. return BlockResult::InterruptedByDeath;
  271. return BlockResult::WokeNormally;
  272. }
  273. [[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
  274. {
  275. return block<ConditionBlocker>(state_string, move(condition));
  276. }
  277. BlockResult wait_on(WaitQueue& queue, const char* reason, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr);
  278. void wake_from_queue();
  279. void unblock();
  280. // Tell this thread to unblock if needed,
  281. // gracefully unwind the stack and die.
  282. void set_should_die();
  283. void die_if_needed();
  284. bool tick();
  285. void set_ticks_left(u32 t) { m_ticks_left = t; }
  286. u32 ticks_left() const { return m_ticks_left; }
  287. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  288. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  289. void set_state(State);
  290. bool is_initialized() const { return m_initialized; }
  291. void set_initialized(bool initialized) { m_initialized = initialized; }
  292. void send_urgent_signal_to_self(u8 signal);
  293. void send_signal(u8 signal, Process* sender);
  294. void consider_unblock(time_t now_sec, long now_usec);
  295. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  296. ShouldUnblockThread dispatch_one_pending_signal();
  297. ShouldUnblockThread dispatch_signal(u8 signal);
  298. bool has_unmasked_pending_signals() const { return m_pending_signals & ~m_signal_mask; }
  299. void terminate_due_to_signal(u8 signal);
  300. bool should_ignore_signal(u8 signal) const;
  301. bool has_signal_handler(u8 signal) const;
  302. bool has_pending_signal(u8 signal) const { return m_pending_signals & (1 << (signal - 1)); }
  303. FPUState& fpu_state() { return *m_fpu_state; }
  304. void set_default_signal_dispositions();
  305. void push_value_on_stack(FlatPtr);
  306. u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment, Vector<AuxiliaryValue>);
  307. void make_thread_specific_region(Badge<Process>);
  308. unsigned syscall_count() const { return m_syscall_count; }
  309. void did_syscall() { ++m_syscall_count; }
  310. unsigned inode_faults() const { return m_inode_faults; }
  311. void did_inode_fault() { ++m_inode_faults; }
  312. unsigned zero_faults() const { return m_zero_faults; }
  313. void did_zero_fault() { ++m_zero_faults; }
  314. unsigned cow_faults() const { return m_cow_faults; }
  315. void did_cow_fault() { ++m_cow_faults; }
  316. unsigned file_read_bytes() const { return m_file_read_bytes; }
  317. unsigned file_write_bytes() const { return m_file_write_bytes; }
  318. void did_file_read(unsigned bytes)
  319. {
  320. m_file_read_bytes += bytes;
  321. }
  322. void did_file_write(unsigned bytes)
  323. {
  324. m_file_write_bytes += bytes;
  325. }
  326. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  327. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  328. void did_unix_socket_read(unsigned bytes)
  329. {
  330. m_unix_socket_read_bytes += bytes;
  331. }
  332. void did_unix_socket_write(unsigned bytes)
  333. {
  334. m_unix_socket_write_bytes += bytes;
  335. }
  336. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  337. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  338. void did_ipv4_socket_read(unsigned bytes)
  339. {
  340. m_ipv4_socket_read_bytes += bytes;
  341. }
  342. void did_ipv4_socket_write(unsigned bytes)
  343. {
  344. m_ipv4_socket_write_bytes += bytes;
  345. }
  346. const char* wait_reason() const
  347. {
  348. return m_wait_reason;
  349. }
  350. void set_active(bool active)
  351. {
  352. ASSERT(g_scheduler_lock.is_locked());
  353. m_is_active = active;
  354. }
  355. bool is_finalizable() const
  356. {
  357. ASSERT(g_scheduler_lock.is_locked());
  358. return !m_is_active;
  359. }
  360. Thread* clone(Process&);
  361. template<typename Callback>
  362. static IterationDecision for_each_in_state(State, Callback);
  363. template<typename Callback>
  364. static IterationDecision for_each_living(Callback);
  365. template<typename Callback>
  366. static IterationDecision for_each(Callback);
  367. static bool is_runnable_state(Thread::State state)
  368. {
  369. return state == Thread::State::Running || state == Thread::State::Runnable;
  370. }
  371. static constexpr u32 default_kernel_stack_size = 65536;
  372. static constexpr u32 default_userspace_stack_size = 4 * MB;
  373. ThreadTracer* tracer() { return m_tracer.ptr(); }
  374. void start_tracing_from(pid_t tracer);
  375. void stop_tracing();
  376. void tracer_trap(const RegisterState&);
  377. private:
  378. IntrusiveListNode m_runnable_list_node;
  379. IntrusiveListNode m_wait_queue_node;
  380. private:
  381. friend class SchedulerData;
  382. friend class WaitQueue;
  383. bool unlock_process_if_locked();
  384. void relock_process(bool did_unlock);
  385. String backtrace_impl();
  386. void reset_fpu_state();
  387. Process& m_process;
  388. int m_tid { -1 };
  389. TSS32 m_tss;
  390. Atomic<u32> m_cpu { 0 };
  391. u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
  392. u32 m_ticks { 0 };
  393. u32 m_ticks_left { 0 };
  394. u32 m_times_scheduled { 0 };
  395. u32 m_pending_signals { 0 };
  396. u32 m_signal_mask { 0 };
  397. u32 m_kernel_stack_base { 0 };
  398. u32 m_kernel_stack_top { 0 };
  399. OwnPtr<Region> m_kernel_stack_region;
  400. VirtualAddress m_thread_specific_data;
  401. size_t m_thread_specific_region_size { 0 };
  402. SignalActionData m_signal_action_data[32];
  403. Blocker* m_blocker { nullptr };
  404. const char* m_wait_reason { nullptr };
  405. bool m_is_active { false };
  406. bool m_is_joinable { true };
  407. Thread* m_joiner { nullptr };
  408. Thread* m_joinee { nullptr };
  409. void* m_exit_value { nullptr };
  410. unsigned m_syscall_count { 0 };
  411. unsigned m_inode_faults { 0 };
  412. unsigned m_zero_faults { 0 };
  413. unsigned m_cow_faults { 0 };
  414. unsigned m_file_read_bytes { 0 };
  415. unsigned m_file_write_bytes { 0 };
  416. unsigned m_unix_socket_read_bytes { 0 };
  417. unsigned m_unix_socket_write_bytes { 0 };
  418. unsigned m_ipv4_socket_read_bytes { 0 };
  419. unsigned m_ipv4_socket_write_bytes { 0 };
  420. FPUState* m_fpu_state { nullptr };
  421. State m_state { Invalid };
  422. String m_name;
  423. u32 m_priority { THREAD_PRIORITY_NORMAL };
  424. u32 m_extra_priority { 0 };
  425. u32 m_priority_boost { 0 };
  426. u8 m_stop_signal { 0 };
  427. State m_stop_state { Invalid };
  428. bool m_dump_backtrace_on_finalization { false };
  429. bool m_should_die { false };
  430. bool m_initialized { false };
  431. OwnPtr<ThreadTracer> m_tracer;
  432. void yield_without_holding_big_lock();
  433. };
  434. HashTable<Thread*>& thread_table();
  435. template<typename Callback>
  436. inline IterationDecision Thread::for_each_living(Callback callback)
  437. {
  438. ASSERT_INTERRUPTS_DISABLED();
  439. return Thread::for_each([callback](Thread& thread) -> IterationDecision {
  440. if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
  441. return callback(thread);
  442. return IterationDecision::Continue;
  443. });
  444. }
  445. template<typename Callback>
  446. inline IterationDecision Thread::for_each(Callback callback)
  447. {
  448. ASSERT_INTERRUPTS_DISABLED();
  449. ScopedSpinLock lock(g_scheduler_lock);
  450. auto ret = Scheduler::for_each_runnable(callback);
  451. if (ret == IterationDecision::Break)
  452. return ret;
  453. return Scheduler::for_each_nonrunnable(callback);
  454. }
  455. template<typename Callback>
  456. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  457. {
  458. ASSERT_INTERRUPTS_DISABLED();
  459. ScopedSpinLock lock(g_scheduler_lock);
  460. auto new_callback = [=](Thread& thread) -> IterationDecision {
  461. if (thread.state() == state)
  462. return callback(thread);
  463. return IterationDecision::Continue;
  464. };
  465. if (is_runnable_state(state))
  466. return Scheduler::for_each_runnable(new_callback);
  467. return Scheduler::for_each_nonrunnable(new_callback);
  468. }
  469. const LogStream& operator<<(const LogStream&, const Thread&);
  470. struct SchedulerData {
  471. typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
  472. ThreadList m_runnable_threads;
  473. ThreadList m_nonrunnable_threads;
  474. ThreadList& thread_list_for_state(Thread::State state)
  475. {
  476. if (Thread::is_runnable_state(state))
  477. return m_runnable_threads;
  478. return m_nonrunnable_threads;
  479. }
  480. };
  481. template<typename Callback>
  482. inline IterationDecision Scheduler::for_each_runnable(Callback callback)
  483. {
  484. ASSERT_INTERRUPTS_DISABLED();
  485. ASSERT(g_scheduler_lock.is_locked());
  486. auto& tl = g_scheduler_data->m_runnable_threads;
  487. for (auto it = tl.begin(); it != tl.end();) {
  488. auto& thread = *it;
  489. it = ++it;
  490. if (callback(thread) == IterationDecision::Break)
  491. return IterationDecision::Break;
  492. }
  493. return IterationDecision::Continue;
  494. }
  495. template<typename Callback>
  496. inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
  497. {
  498. ASSERT_INTERRUPTS_DISABLED();
  499. ASSERT(g_scheduler_lock.is_locked());
  500. auto& tl = g_scheduler_data->m_nonrunnable_threads;
  501. for (auto it = tl.begin(); it != tl.end();) {
  502. auto& thread = *it;
  503. it = ++it;
  504. if (callback(thread) == IterationDecision::Break)
  505. return IterationDecision::Break;
  506. }
  507. return IterationDecision::Continue;
  508. }
  509. }