Scheduler.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/ScopeGuard.h>
  7. #include <AK/Time.h>
  8. #include <Kernel/Arch/x86/InterruptDisabler.h>
  9. #include <Kernel/Debug.h>
  10. #include <Kernel/Panic.h>
  11. #include <Kernel/PerformanceManager.h>
  12. #include <Kernel/Process.h>
  13. #include <Kernel/RTC.h>
  14. #include <Kernel/Scheduler.h>
  15. #include <Kernel/Sections.h>
  16. #include <Kernel/Time/TimeManagement.h>
  17. // Remove this once SMP is stable and can be enabled by default
  18. #define SCHEDULE_ON_ALL_PROCESSORS 0
  19. namespace Kernel {
  20. class SchedulerPerProcessorData {
  21. AK_MAKE_NONCOPYABLE(SchedulerPerProcessorData);
  22. AK_MAKE_NONMOVABLE(SchedulerPerProcessorData);
  23. public:
  24. SchedulerPerProcessorData() = default;
  25. bool m_in_scheduler { true };
  26. };
  27. RecursiveSpinLock g_scheduler_lock;
  28. static u32 time_slice_for(const Thread& thread)
  29. {
  30. // One time slice unit == 4ms (assuming 250 ticks/second)
  31. if (thread.is_idle_thread())
  32. return 1;
  33. return 2;
  34. }
  35. READONLY_AFTER_INIT Thread* g_finalizer;
  36. READONLY_AFTER_INIT WaitQueue* g_finalizer_wait_queue;
  37. Atomic<bool> g_finalizer_has_work { false };
  38. READONLY_AFTER_INIT static Process* s_colonel_process;
  39. struct ThreadReadyQueue {
  40. IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_ready_queue_node> thread_list;
  41. };
  42. static SpinLock<u8> g_ready_queues_lock;
  43. static u32 g_ready_queues_mask;
  44. static constexpr u32 g_ready_queue_buckets = sizeof(g_ready_queues_mask) * 8;
  45. READONLY_AFTER_INIT static ThreadReadyQueue* g_ready_queues; // g_ready_queue_buckets entries
  46. static TotalTimeScheduled g_total_time_scheduled;
  47. static SpinLock<u8> g_total_time_scheduled_lock;
  48. // The Scheduler::current_time function provides a current time for scheduling purposes,
  49. // which may not necessarily relate to wall time
  50. u64 (*Scheduler::current_time)();
  51. static void dump_thread_list(bool = false);
  52. static inline u32 thread_priority_to_priority_index(u32 thread_priority)
  53. {
  54. // Converts the priority in the range of THREAD_PRIORITY_MIN...THREAD_PRIORITY_MAX
  55. // to a index into g_ready_queues where 0 is the highest priority bucket
  56. VERIFY(thread_priority >= THREAD_PRIORITY_MIN && thread_priority <= THREAD_PRIORITY_MAX);
  57. constexpr u32 thread_priority_count = THREAD_PRIORITY_MAX - THREAD_PRIORITY_MIN + 1;
  58. static_assert(thread_priority_count > 0);
  59. auto priority_bucket = ((thread_priority_count - (thread_priority - THREAD_PRIORITY_MIN)) / thread_priority_count) * (g_ready_queue_buckets - 1);
  60. VERIFY(priority_bucket < g_ready_queue_buckets);
  61. return priority_bucket;
  62. }
  63. Thread& Scheduler::pull_next_runnable_thread()
  64. {
  65. auto affinity_mask = 1u << Processor::current().id();
  66. ScopedSpinLock lock(g_ready_queues_lock);
  67. auto priority_mask = g_ready_queues_mask;
  68. while (priority_mask != 0) {
  69. auto priority = __builtin_ffsl(priority_mask);
  70. VERIFY(priority > 0);
  71. auto& ready_queue = g_ready_queues[--priority];
  72. for (auto& thread : ready_queue.thread_list) {
  73. VERIFY(thread.m_runnable_priority == (int)priority);
  74. if (thread.is_active())
  75. continue;
  76. if (!(thread.affinity() & affinity_mask))
  77. continue;
  78. thread.m_runnable_priority = -1;
  79. ready_queue.thread_list.remove(thread);
  80. if (ready_queue.thread_list.is_empty())
  81. g_ready_queues_mask &= ~(1u << priority);
  82. // Mark it as active because we are using this thread. This is similar
  83. // to comparing it with Processor::current_thread, but when there are
  84. // multiple processors there's no easy way to check whether the thread
  85. // is actually still needed. This prevents accidental finalization when
  86. // a thread is no longer in Running state, but running on another core.
  87. // We need to mark it active here so that this thread won't be
  88. // scheduled on another core if it were to be queued before actually
  89. // switching to it.
  90. // FIXME: Figure out a better way maybe?
  91. thread.set_active(true);
  92. return thread;
  93. }
  94. priority_mask &= ~(1u << priority);
  95. }
  96. return *Processor::idle_thread();
  97. }
  98. Thread* Scheduler::peek_next_runnable_thread()
  99. {
  100. auto affinity_mask = 1u << Processor::current().id();
  101. ScopedSpinLock lock(g_ready_queues_lock);
  102. auto priority_mask = g_ready_queues_mask;
  103. while (priority_mask != 0) {
  104. auto priority = __builtin_ffsl(priority_mask);
  105. VERIFY(priority > 0);
  106. auto& ready_queue = g_ready_queues[--priority];
  107. for (auto& thread : ready_queue.thread_list) {
  108. VERIFY(thread.m_runnable_priority == (int)priority);
  109. if (thread.is_active())
  110. continue;
  111. if (!(thread.affinity() & affinity_mask))
  112. continue;
  113. return &thread;
  114. }
  115. priority_mask &= ~(1u << priority);
  116. }
  117. // Unlike in pull_next_runnable_thread() we don't want to fall back to
  118. // the idle thread. We just want to see if we have any other thread ready
  119. // to be scheduled.
  120. return nullptr;
  121. }
  122. bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
  123. {
  124. if (thread.is_idle_thread())
  125. return true;
  126. ScopedSpinLock lock(g_ready_queues_lock);
  127. auto priority = thread.m_runnable_priority;
  128. if (priority < 0) {
  129. VERIFY(!thread.m_ready_queue_node.is_in_list());
  130. return false;
  131. }
  132. if (check_affinity && !(thread.affinity() & (1 << Processor::current().id())))
  133. return false;
  134. VERIFY(g_ready_queues_mask & (1u << priority));
  135. auto& ready_queue = g_ready_queues[priority];
  136. thread.m_runnable_priority = -1;
  137. ready_queue.thread_list.remove(thread);
  138. if (ready_queue.thread_list.is_empty())
  139. g_ready_queues_mask &= ~(1u << priority);
  140. return true;
  141. }
  142. void Scheduler::queue_runnable_thread(Thread& thread)
  143. {
  144. VERIFY(g_scheduler_lock.own_lock());
  145. if (thread.is_idle_thread())
  146. return;
  147. auto priority = thread_priority_to_priority_index(thread.priority());
  148. ScopedSpinLock lock(g_ready_queues_lock);
  149. VERIFY(thread.m_runnable_priority < 0);
  150. thread.m_runnable_priority = (int)priority;
  151. VERIFY(!thread.m_ready_queue_node.is_in_list());
  152. auto& ready_queue = g_ready_queues[priority];
  153. bool was_empty = ready_queue.thread_list.is_empty();
  154. ready_queue.thread_list.append(thread);
  155. if (was_empty)
  156. g_ready_queues_mask |= (1u << priority);
  157. }
  158. UNMAP_AFTER_INIT void Scheduler::start()
  159. {
  160. VERIFY_INTERRUPTS_DISABLED();
  161. // We need to acquire our scheduler lock, which will be released
  162. // by the idle thread once control transferred there
  163. g_scheduler_lock.lock();
  164. auto& processor = Processor::current();
  165. processor.set_scheduler_data(*new SchedulerPerProcessorData());
  166. VERIFY(processor.is_initialized());
  167. auto& idle_thread = *Processor::idle_thread();
  168. VERIFY(processor.current_thread() == &idle_thread);
  169. idle_thread.set_ticks_left(time_slice_for(idle_thread));
  170. idle_thread.did_schedule();
  171. idle_thread.set_initialized(true);
  172. processor.init_context(idle_thread, false);
  173. idle_thread.set_state(Thread::Running);
  174. VERIFY(idle_thread.affinity() == (1u << processor.get_id()));
  175. processor.initialize_context_switching(idle_thread);
  176. VERIFY_NOT_REACHED();
  177. }
  178. bool Scheduler::pick_next()
  179. {
  180. VERIFY_INTERRUPTS_DISABLED();
  181. // Set the m_in_scheduler flag before acquiring the spinlock. This
  182. // prevents a recursive call into Scheduler::invoke_async upon
  183. // leaving the scheduler lock.
  184. ScopedCritical critical;
  185. auto& scheduler_data = Processor::current().get_scheduler_data();
  186. scheduler_data.m_in_scheduler = true;
  187. ScopeGuard guard(
  188. []() {
  189. // We may be on a different processor after we got switched
  190. // back to this thread!
  191. auto& scheduler_data = Processor::current().get_scheduler_data();
  192. VERIFY(scheduler_data.m_in_scheduler);
  193. scheduler_data.m_in_scheduler = false;
  194. });
  195. ScopedSpinLock lock(g_scheduler_lock);
  196. if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
  197. dump_thread_list();
  198. }
  199. auto& thread_to_schedule = pull_next_runnable_thread();
  200. if constexpr (SCHEDULER_DEBUG) {
  201. #if ARCH(I386)
  202. dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}",
  203. Processor::id(),
  204. thread_to_schedule,
  205. thread_to_schedule.regs().cs, thread_to_schedule.regs().ip());
  206. #else
  207. dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:016x}",
  208. Processor::id(),
  209. thread_to_schedule,
  210. thread_to_schedule.regs().cs, thread_to_schedule.regs().rip);
  211. #endif
  212. }
  213. // We need to leave our first critical section before switching context,
  214. // but since we're still holding the scheduler lock we're still in a critical section
  215. critical.leave();
  216. thread_to_schedule.set_ticks_left(time_slice_for(thread_to_schedule));
  217. return context_switch(&thread_to_schedule);
  218. }
  219. bool Scheduler::yield()
  220. {
  221. InterruptDisabler disabler;
  222. auto& proc = Processor::current();
  223. auto current_thread = Thread::current();
  224. dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq());
  225. VERIFY(current_thread != nullptr);
  226. if (proc.in_irq() || proc.in_critical()) {
  227. // If we're handling an IRQ we can't switch context, or we're in
  228. // a critical section where we don't want to switch contexts, then
  229. // delay until exiting the trap or critical section
  230. proc.invoke_scheduler_async();
  231. return false;
  232. }
  233. if (!Scheduler::pick_next())
  234. return false;
  235. if constexpr (SCHEDULER_DEBUG)
  236. dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current().in_irq());
  237. return true;
  238. }
  239. bool Scheduler::context_switch(Thread* thread)
  240. {
  241. if (s_mm_lock.own_lock()) {
  242. PANIC("In context switch while holding s_mm_lock");
  243. }
  244. thread->did_schedule();
  245. auto from_thread = Thread::current();
  246. if (from_thread == thread)
  247. return false;
  248. if (from_thread) {
  249. // If the last process hasn't blocked (still marked as running),
  250. // mark it as runnable for the next round.
  251. if (from_thread->state() == Thread::Running)
  252. from_thread->set_state(Thread::Runnable);
  253. #ifdef LOG_EVERY_CONTEXT_SWITCH
  254. const auto msg =
  255. # if ARCH(I386)
  256. "Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}";
  257. # else
  258. "Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:16x}";
  259. # endif
  260. dbgln(msg,
  261. Processor::id(), from_thread->tid().value(),
  262. thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().ip());
  263. #endif
  264. }
  265. auto& proc = Processor::current();
  266. if (!thread->is_initialized()) {
  267. proc.init_context(*thread, false);
  268. thread->set_initialized(true);
  269. }
  270. thread->set_state(Thread::Running);
  271. PerformanceManager::add_context_switch_perf_event(*from_thread, *thread);
  272. proc.switch_context(from_thread, thread);
  273. // NOTE: from_thread at this point reflects the thread we were
  274. // switched from, and thread reflects Thread::current()
  275. enter_current(*from_thread, false);
  276. VERIFY(thread == Thread::current());
  277. if (thread->process().is_user_process()) {
  278. auto& regs = Thread::current()->get_register_dump_from_stack();
  279. auto iopl = get_iopl_from_eflags(regs.flags());
  280. if (iopl != 0) {
  281. PANIC("Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl);
  282. }
  283. }
  284. return true;
  285. }
  286. void Scheduler::enter_current(Thread& prev_thread, bool is_first)
  287. {
  288. VERIFY(g_scheduler_lock.own_lock());
  289. // We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
  290. auto scheduler_time = Scheduler::current_time();
  291. prev_thread.update_time_scheduled(scheduler_time, true, true);
  292. auto* current_thread = Thread::current();
  293. current_thread->update_time_scheduled(scheduler_time, true, false);
  294. prev_thread.set_active(false);
  295. if (prev_thread.state() == Thread::Dying) {
  296. // If the thread we switched from is marked as dying, then notify
  297. // the finalizer. Note that as soon as we leave the scheduler lock
  298. // the finalizer may free from_thread!
  299. notify_finalizer();
  300. } else if (!is_first) {
  301. // Check if we have any signals we should deliver (even if we don't
  302. // end up switching to another thread).
  303. if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode) {
  304. ScopedSpinLock lock(current_thread->get_lock());
  305. if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
  306. current_thread->dispatch_one_pending_signal();
  307. }
  308. }
  309. }
  310. }
  311. void Scheduler::leave_on_first_switch(u32 flags)
  312. {
  313. // This is called when a thread is switched into for the first time.
  314. // At this point, enter_current has already be called, but because
  315. // Scheduler::context_switch is not in the call stack we need to
  316. // clean up and release locks manually here
  317. g_scheduler_lock.unlock(flags);
  318. auto& scheduler_data = Processor::current().get_scheduler_data();
  319. VERIFY(scheduler_data.m_in_scheduler);
  320. scheduler_data.m_in_scheduler = false;
  321. }
  322. void Scheduler::prepare_after_exec()
  323. {
  324. // This is called after exec() when doing a context "switch" into
  325. // the new process. This is called from Processor::assume_context
  326. VERIFY(g_scheduler_lock.own_lock());
  327. auto& scheduler_data = Processor::current().get_scheduler_data();
  328. VERIFY(!scheduler_data.m_in_scheduler);
  329. scheduler_data.m_in_scheduler = true;
  330. }
  331. void Scheduler::prepare_for_idle_loop()
  332. {
  333. // This is called when the CPU finished setting up the idle loop
  334. // and is about to run it. We need to acquire he scheduler lock
  335. VERIFY(!g_scheduler_lock.own_lock());
  336. g_scheduler_lock.lock();
  337. auto& scheduler_data = Processor::current().get_scheduler_data();
  338. VERIFY(!scheduler_data.m_in_scheduler);
  339. scheduler_data.m_in_scheduler = true;
  340. }
  341. Process* Scheduler::colonel()
  342. {
  343. VERIFY(s_colonel_process);
  344. return s_colonel_process;
  345. }
  346. static u64 current_time_tsc()
  347. {
  348. return read_tsc();
  349. }
  350. static u64 current_time_monotonic()
  351. {
  352. // We always need a precise timestamp here, we cannot rely on a coarse timestamp
  353. return (u64)TimeManagement::the().monotonic_time(TimePrecision::Precise).to_nanoseconds();
  354. }
  355. UNMAP_AFTER_INIT void Scheduler::initialize()
  356. {
  357. VERIFY(Processor::is_initialized()); // sanity check
  358. // Figure out a good scheduling time source
  359. if (Processor::current().has_feature(CPUFeature::TSC)) {
  360. // TODO: only use if TSC is running at a constant frequency?
  361. current_time = current_time_tsc;
  362. } else {
  363. // TODO: Using HPET is rather slow, can we use any other time source that may be faster?
  364. current_time = current_time_monotonic;
  365. }
  366. RefPtr<Thread> idle_thread;
  367. g_finalizer_wait_queue = new WaitQueue;
  368. g_ready_queues = new ThreadReadyQueue[g_ready_queue_buckets];
  369. g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
  370. s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop, nullptr, 1, Process::RegisterProcess::No).leak_ref();
  371. VERIFY(s_colonel_process);
  372. VERIFY(idle_thread);
  373. idle_thread->set_priority(THREAD_PRIORITY_MIN);
  374. idle_thread->set_name(StringView("idle thread #0"));
  375. set_idle_thread(idle_thread);
  376. }
  377. UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
  378. {
  379. idle_thread->set_idle_thread();
  380. Processor::current().set_idle_thread(*idle_thread);
  381. Processor::current().set_current_thread(*idle_thread);
  382. }
  383. UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
  384. {
  385. VERIFY(cpu != 0);
  386. // This function is called on the bsp, but creates an idle thread for another AP
  387. VERIFY(Processor::is_bootstrap_processor());
  388. VERIFY(s_colonel_process);
  389. Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, String::formatted("idle thread #{}", cpu), 1 << cpu, false);
  390. VERIFY(idle_thread);
  391. return idle_thread;
  392. }
  393. void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel)
  394. {
  395. ScopedSpinLock lock(g_total_time_scheduled_lock);
  396. g_total_time_scheduled.total += time_to_add;
  397. if (is_kernel)
  398. g_total_time_scheduled.total_kernel += time_to_add;
  399. }
  400. void Scheduler::timer_tick(const RegisterState& regs)
  401. {
  402. VERIFY_INTERRUPTS_DISABLED();
  403. VERIFY(Processor::current().in_irq());
  404. auto current_thread = Processor::current_thread();
  405. if (!current_thread)
  406. return;
  407. // Sanity checks
  408. VERIFY(current_thread->current_trap());
  409. VERIFY(current_thread->current_trap()->regs == &regs);
  410. #if !SCHEDULE_ON_ALL_PROCESSORS
  411. if (!Processor::is_bootstrap_processor())
  412. return; // TODO: This prevents scheduling on other CPUs!
  413. #endif
  414. if (current_thread->process().is_kernel_process()) {
  415. // Because the previous mode when entering/exiting kernel threads never changes
  416. // we never update the time scheduled. So we need to update it manually on the
  417. // timer interrupt
  418. current_thread->update_time_scheduled(current_time(), true, false);
  419. }
  420. if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
  421. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  422. dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
  423. current_thread->set_state(Thread::Dying);
  424. Processor::current().invoke_scheduler_async();
  425. return;
  426. }
  427. if (current_thread->tick())
  428. return;
  429. if (!current_thread->is_idle_thread() && !peek_next_runnable_thread()) {
  430. // If no other thread is ready to be scheduled we don't need to
  431. // switch to the idle thread. Just give the current thread another
  432. // time slice and let it run!
  433. current_thread->set_ticks_left(time_slice_for(*current_thread));
  434. current_thread->did_schedule();
  435. dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: No other threads ready, give {} another timeslice", Processor::id(), *current_thread);
  436. return;
  437. }
  438. VERIFY_INTERRUPTS_DISABLED();
  439. VERIFY(Processor::current().in_irq());
  440. Processor::current().invoke_scheduler_async();
  441. }
  442. void Scheduler::invoke_async()
  443. {
  444. VERIFY_INTERRUPTS_DISABLED();
  445. auto& proc = Processor::current();
  446. VERIFY(!proc.in_irq());
  447. // Since this function is called when leaving critical sections (such
  448. // as a SpinLock), we need to check if we're not already doing this
  449. // to prevent recursion
  450. if (!proc.get_scheduler_data().m_in_scheduler)
  451. pick_next();
  452. }
  453. void Scheduler::yield_from_critical()
  454. {
  455. auto& proc = Processor::current();
  456. VERIFY(proc.in_critical());
  457. VERIFY(!proc.in_irq());
  458. yield(); // Flag a context switch
  459. u32 prev_flags;
  460. u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
  461. // Note, we may now be on a different CPU!
  462. Processor::current().restore_critical(prev_crit, prev_flags);
  463. }
  464. void Scheduler::notify_finalizer()
  465. {
  466. if (g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel) == false)
  467. g_finalizer_wait_queue->wake_all();
  468. }
  469. void Scheduler::idle_loop(void*)
  470. {
  471. auto& proc = Processor::current();
  472. dbgln("Scheduler[{}]: idle loop running", proc.get_id());
  473. VERIFY(are_interrupts_enabled());
  474. for (;;) {
  475. proc.idle_begin();
  476. asm("hlt");
  477. proc.idle_end();
  478. VERIFY_INTERRUPTS_ENABLED();
  479. #if SCHEDULE_ON_ALL_PROCESSORS
  480. yield();
  481. #else
  482. if (Processor::current().id() == 0)
  483. yield();
  484. #endif
  485. }
  486. }
  487. void Scheduler::dump_scheduler_state(bool with_stack_traces)
  488. {
  489. dump_thread_list(with_stack_traces);
  490. }
  491. bool Scheduler::is_initialized()
  492. {
  493. // The scheduler is initialized iff the idle thread exists
  494. return Processor::idle_thread() != nullptr;
  495. }
  496. TotalTimeScheduled Scheduler::get_total_time_scheduled()
  497. {
  498. ScopedSpinLock lock(g_total_time_scheduled_lock);
  499. return g_total_time_scheduled;
  500. }
  501. void dump_thread_list(bool with_stack_traces)
  502. {
  503. dbgln("Scheduler thread list for processor {}:", Processor::id());
  504. auto get_cs = [](Thread& thread) -> u16 {
  505. if (!thread.current_trap())
  506. return thread.regs().cs;
  507. return thread.get_register_dump_from_stack().cs;
  508. };
  509. auto get_eip = [](Thread& thread) -> u32 {
  510. if (!thread.current_trap())
  511. return thread.regs().ip();
  512. return thread.get_register_dump_from_stack().ip();
  513. };
  514. Thread::for_each([&](Thread& thread) {
  515. switch (thread.state()) {
  516. case Thread::Dying:
  517. dmesgln(" {:14} {:30} @ {:04x}:{:08x} Finalizable: {}, (nsched: {})",
  518. thread.state_string(),
  519. thread,
  520. get_cs(thread),
  521. get_eip(thread),
  522. thread.is_finalizable(),
  523. thread.times_scheduled());
  524. break;
  525. default:
  526. dmesgln(" {:14} Pr:{:2} {:30} @ {:04x}:{:08x} (nsched: {})",
  527. thread.state_string(),
  528. thread.priority(),
  529. thread,
  530. get_cs(thread),
  531. get_eip(thread),
  532. thread.times_scheduled());
  533. break;
  534. }
  535. if (with_stack_traces)
  536. dbgln("{}", thread.backtrace());
  537. });
  538. }
  539. }