Scheduler.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BuiltinWrappers.h>
  7. #include <AK/ScopeGuard.h>
  8. #include <AK/Singleton.h>
  9. #include <AK/Time.h>
  10. #include <Kernel/Arch/x86/InterruptDisabler.h>
  11. #include <Kernel/Arch/x86/TrapFrame.h>
  12. #include <Kernel/Debug.h>
  13. #include <Kernel/Panic.h>
  14. #include <Kernel/PerformanceManager.h>
  15. #include <Kernel/Process.h>
  16. #include <Kernel/RTC.h>
  17. #include <Kernel/Scheduler.h>
  18. #include <Kernel/Sections.h>
  19. #include <Kernel/Time/TimeManagement.h>
  20. #include <Kernel/kstdio.h>
  21. // Remove this once SMP is stable and can be enabled by default
  22. #define SCHEDULE_ON_ALL_PROCESSORS 0
  23. namespace Kernel {
  24. RecursiveSpinlock g_scheduler_lock;
  25. static u32 time_slice_for(const Thread& thread)
  26. {
  27. // One time slice unit == 4ms (assuming 250 ticks/second)
  28. if (thread.is_idle_thread())
  29. return 1;
  30. return 2;
  31. }
  32. READONLY_AFTER_INIT Thread* g_finalizer;
  33. READONLY_AFTER_INIT WaitQueue* g_finalizer_wait_queue;
  34. Atomic<bool> g_finalizer_has_work { false };
  35. READONLY_AFTER_INIT static Process* s_colonel_process;
  36. struct ThreadReadyQueue {
  37. IntrusiveList<&Thread::m_ready_queue_node> thread_list;
  38. };
  39. struct ThreadReadyQueues {
  40. u32 mask {};
  41. static constexpr size_t count = sizeof(mask) * 8;
  42. Array<ThreadReadyQueue, count> queues;
  43. };
  44. static Singleton<SpinlockProtected<ThreadReadyQueues>> g_ready_queues;
  45. static SpinlockProtected<TotalTimeScheduled> g_total_time_scheduled;
  46. // The Scheduler::current_time function provides a current time for scheduling purposes,
  47. // which may not necessarily relate to wall time
  48. u64 (*Scheduler::current_time)();
  49. static void dump_thread_list(bool = false);
  50. static inline u32 thread_priority_to_priority_index(u32 thread_priority)
  51. {
  52. // Converts the priority in the range of THREAD_PRIORITY_MIN...THREAD_PRIORITY_MAX
  53. // to a index into g_ready_queues where 0 is the highest priority bucket
  54. VERIFY(thread_priority >= THREAD_PRIORITY_MIN && thread_priority <= THREAD_PRIORITY_MAX);
  55. constexpr u32 thread_priority_count = THREAD_PRIORITY_MAX - THREAD_PRIORITY_MIN + 1;
  56. static_assert(thread_priority_count > 0);
  57. auto priority_bucket = ((thread_priority_count - (thread_priority - THREAD_PRIORITY_MIN)) / thread_priority_count) * (ThreadReadyQueues::count - 1);
  58. VERIFY(priority_bucket < ThreadReadyQueues::count);
  59. return priority_bucket;
  60. }
  61. Thread& Scheduler::pull_next_runnable_thread()
  62. {
  63. auto affinity_mask = 1u << Processor::current_id();
  64. return g_ready_queues->with([&](auto& ready_queues) -> Thread& {
  65. auto priority_mask = ready_queues.mask;
  66. while (priority_mask != 0) {
  67. auto priority = bit_scan_forward(priority_mask);
  68. VERIFY(priority > 0);
  69. auto& ready_queue = ready_queues.queues[--priority];
  70. for (auto& thread : ready_queue.thread_list) {
  71. VERIFY(thread.m_runnable_priority == (int)priority);
  72. if (thread.is_active())
  73. continue;
  74. if (!(thread.affinity() & affinity_mask))
  75. continue;
  76. thread.m_runnable_priority = -1;
  77. ready_queue.thread_list.remove(thread);
  78. if (ready_queue.thread_list.is_empty())
  79. ready_queues.mask &= ~(1u << priority);
  80. // Mark it as active because we are using this thread. This is similar
  81. // to comparing it with Processor::current_thread, but when there are
  82. // multiple processors there's no easy way to check whether the thread
  83. // is actually still needed. This prevents accidental finalization when
  84. // a thread is no longer in Running state, but running on another core.
  85. // We need to mark it active here so that this thread won't be
  86. // scheduled on another core if it were to be queued before actually
  87. // switching to it.
  88. // FIXME: Figure out a better way maybe?
  89. thread.set_active(true);
  90. return thread;
  91. }
  92. priority_mask &= ~(1u << priority);
  93. }
  94. return *Processor::idle_thread();
  95. });
  96. }
  97. Thread* Scheduler::peek_next_runnable_thread()
  98. {
  99. auto affinity_mask = 1u << Processor::current_id();
  100. return g_ready_queues->with([&](auto& ready_queues) -> Thread* {
  101. auto priority_mask = ready_queues.mask;
  102. while (priority_mask != 0) {
  103. auto priority = bit_scan_forward(priority_mask);
  104. VERIFY(priority > 0);
  105. auto& ready_queue = ready_queues.queues[--priority];
  106. for (auto& thread : ready_queue.thread_list) {
  107. VERIFY(thread.m_runnable_priority == (int)priority);
  108. if (thread.is_active())
  109. continue;
  110. if (!(thread.affinity() & affinity_mask))
  111. continue;
  112. return &thread;
  113. }
  114. priority_mask &= ~(1u << priority);
  115. }
  116. // Unlike in pull_next_runnable_thread() we don't want to fall back to
  117. // the idle thread. We just want to see if we have any other thread ready
  118. // to be scheduled.
  119. return nullptr;
  120. });
  121. }
  122. bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
  123. {
  124. if (thread.is_idle_thread())
  125. return true;
  126. return g_ready_queues->with([&](auto& ready_queues) {
  127. auto priority = thread.m_runnable_priority;
  128. if (priority < 0) {
  129. VERIFY(!thread.m_ready_queue_node.is_in_list());
  130. return false;
  131. }
  132. if (check_affinity && !(thread.affinity() & (1 << Processor::current_id())))
  133. return false;
  134. VERIFY(ready_queues.mask & (1u << priority));
  135. auto& ready_queue = ready_queues.queues[priority];
  136. thread.m_runnable_priority = -1;
  137. ready_queue.thread_list.remove(thread);
  138. if (ready_queue.thread_list.is_empty())
  139. ready_queues.mask &= ~(1u << priority);
  140. return true;
  141. });
  142. }
  143. void Scheduler::enqueue_runnable_thread(Thread& thread)
  144. {
  145. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  146. if (thread.is_idle_thread())
  147. return;
  148. auto priority = thread_priority_to_priority_index(thread.priority());
  149. g_ready_queues->with([&](auto& ready_queues) {
  150. VERIFY(thread.m_runnable_priority < 0);
  151. thread.m_runnable_priority = (int)priority;
  152. VERIFY(!thread.m_ready_queue_node.is_in_list());
  153. auto& ready_queue = ready_queues.queues[priority];
  154. bool was_empty = ready_queue.thread_list.is_empty();
  155. ready_queue.thread_list.append(thread);
  156. if (was_empty)
  157. ready_queues.mask |= (1u << priority);
  158. });
  159. }
  160. UNMAP_AFTER_INIT void Scheduler::start()
  161. {
  162. VERIFY_INTERRUPTS_DISABLED();
  163. // We need to acquire our scheduler lock, which will be released
  164. // by the idle thread once control transferred there
  165. g_scheduler_lock.lock();
  166. auto& processor = Processor::current();
  167. VERIFY(processor.is_initialized());
  168. auto& idle_thread = *Processor::idle_thread();
  169. VERIFY(processor.current_thread() == &idle_thread);
  170. idle_thread.set_ticks_left(time_slice_for(idle_thread));
  171. idle_thread.did_schedule();
  172. idle_thread.set_initialized(true);
  173. processor.init_context(idle_thread, false);
  174. idle_thread.set_state(Thread::State::Running);
  175. VERIFY(idle_thread.affinity() == (1u << processor.id()));
  176. processor.initialize_context_switching(idle_thread);
  177. VERIFY_NOT_REACHED();
  178. }
  179. void Scheduler::pick_next()
  180. {
  181. VERIFY_INTERRUPTS_DISABLED();
  182. // Set the in_scheduler flag before acquiring the spinlock. This
  183. // prevents a recursive call into Scheduler::invoke_async upon
  184. // leaving the scheduler lock.
  185. ScopedCritical critical;
  186. Processor::set_current_in_scheduler(true);
  187. ScopeGuard guard(
  188. []() {
  189. // We may be on a different processor after we got switched
  190. // back to this thread!
  191. VERIFY(Processor::current_in_scheduler());
  192. Processor::set_current_in_scheduler(false);
  193. });
  194. SpinlockLocker lock(g_scheduler_lock);
  195. if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
  196. dump_thread_list();
  197. }
  198. auto& thread_to_schedule = pull_next_runnable_thread();
  199. if constexpr (SCHEDULER_DEBUG) {
  200. dbgln("Scheduler[{}]: Switch to {} @ {:#04x}:{:p}",
  201. Processor::current_id(),
  202. thread_to_schedule,
  203. thread_to_schedule.regs().cs, thread_to_schedule.regs().ip());
  204. }
  205. // We need to leave our first critical section before switching context,
  206. // but since we're still holding the scheduler lock we're still in a critical section
  207. critical.leave();
  208. thread_to_schedule.set_ticks_left(time_slice_for(thread_to_schedule));
  209. context_switch(&thread_to_schedule);
  210. }
  211. void Scheduler::yield()
  212. {
  213. InterruptDisabler disabler;
  214. auto const* current_thread = Thread::current();
  215. dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::current_id(), *current_thread, Processor::current_in_irq());
  216. VERIFY(current_thread != nullptr);
  217. if (Processor::current_in_irq() || Processor::in_critical()) {
  218. // If we're handling an IRQ we can't switch context, or we're in
  219. // a critical section where we don't want to switch contexts, then
  220. // delay until exiting the trap or critical section
  221. Processor::current().invoke_scheduler_async();
  222. return;
  223. }
  224. Scheduler::pick_next();
  225. }
  226. void Scheduler::context_switch(Thread* thread)
  227. {
  228. if (Memory::s_mm_lock.is_locked_by_current_processor()) {
  229. PANIC("In context switch while holding Memory::s_mm_lock");
  230. }
  231. thread->did_schedule();
  232. auto* from_thread = Thread::current();
  233. VERIFY(from_thread);
  234. if (from_thread == thread)
  235. return;
  236. // If the last process hasn't blocked (still marked as running),
  237. // mark it as runnable for the next round.
  238. if (from_thread->state() == Thread::State::Running)
  239. from_thread->set_state(Thread::State::Runnable);
  240. #ifdef LOG_EVERY_CONTEXT_SWITCH
  241. const auto msg = "Scheduler[{}]: {} -> {} [prio={}] {:#04x}:{:p}";
  242. dbgln(msg,
  243. Processor::current_id(), from_thread->tid().value(),
  244. thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().ip());
  245. #endif
  246. auto& proc = Processor::current();
  247. if (!thread->is_initialized()) {
  248. proc.init_context(*thread, false);
  249. thread->set_initialized(true);
  250. }
  251. thread->set_state(Thread::State::Running);
  252. PerformanceManager::add_context_switch_perf_event(*from_thread, *thread);
  253. proc.switch_context(from_thread, thread);
  254. // NOTE: from_thread at this point reflects the thread we were
  255. // switched from, and thread reflects Thread::current()
  256. enter_current(*from_thread);
  257. VERIFY(thread == Thread::current());
  258. }
  259. void Scheduler::enter_current(Thread& prev_thread)
  260. {
  261. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  262. // We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
  263. auto scheduler_time = Scheduler::current_time();
  264. prev_thread.update_time_scheduled(scheduler_time, true, true);
  265. auto* current_thread = Thread::current();
  266. current_thread->update_time_scheduled(scheduler_time, true, false);
  267. // NOTE: When doing an exec(), we will context switch from and to the same thread!
  268. // In that case, we must not mark the previous thread as inactive.
  269. if (&prev_thread != current_thread)
  270. prev_thread.set_active(false);
  271. if (prev_thread.state() == Thread::State::Dying) {
  272. // If the thread we switched from is marked as dying, then notify
  273. // the finalizer. Note that as soon as we leave the scheduler lock
  274. // the finalizer may free from_thread!
  275. notify_finalizer();
  276. }
  277. }
  278. void Scheduler::leave_on_first_switch(u32 flags)
  279. {
  280. // This is called when a thread is switched into for the first time.
  281. // At this point, enter_current has already be called, but because
  282. // Scheduler::context_switch is not in the call stack we need to
  283. // clean up and release locks manually here
  284. g_scheduler_lock.unlock(flags);
  285. VERIFY(Processor::current_in_scheduler());
  286. Processor::set_current_in_scheduler(false);
  287. }
  288. void Scheduler::prepare_after_exec()
  289. {
  290. // This is called after exec() when doing a context "switch" into
  291. // the new process. This is called from Processor::assume_context
  292. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  293. VERIFY(!Processor::current_in_scheduler());
  294. Processor::set_current_in_scheduler(true);
  295. }
  296. void Scheduler::prepare_for_idle_loop()
  297. {
  298. // This is called when the CPU finished setting up the idle loop
  299. // and is about to run it. We need to acquire he scheduler lock
  300. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  301. g_scheduler_lock.lock();
  302. VERIFY(!Processor::current_in_scheduler());
  303. Processor::set_current_in_scheduler(true);
  304. }
  305. Process* Scheduler::colonel()
  306. {
  307. VERIFY(s_colonel_process);
  308. return s_colonel_process;
  309. }
  310. static u64 current_time_tsc()
  311. {
  312. return read_tsc();
  313. }
  314. static u64 current_time_monotonic()
  315. {
  316. // We always need a precise timestamp here, we cannot rely on a coarse timestamp
  317. return (u64)TimeManagement::the().monotonic_time(TimePrecision::Precise).to_nanoseconds();
  318. }
  319. UNMAP_AFTER_INIT void Scheduler::initialize()
  320. {
  321. VERIFY(Processor::is_initialized()); // sanity check
  322. // Figure out a good scheduling time source
  323. if (Processor::current().has_feature(CPUFeature::TSC)) {
  324. // TODO: only use if TSC is running at a constant frequency?
  325. current_time = current_time_tsc;
  326. } else {
  327. // TODO: Using HPET is rather slow, can we use any other time source that may be faster?
  328. current_time = current_time_monotonic;
  329. }
  330. RefPtr<Thread> idle_thread;
  331. g_finalizer_wait_queue = new WaitQueue;
  332. g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
  333. s_colonel_process = Process::create_kernel_process(idle_thread, KString::must_create("colonel"), idle_loop, nullptr, 1, Process::RegisterProcess::No).leak_ref();
  334. VERIFY(s_colonel_process);
  335. VERIFY(idle_thread);
  336. idle_thread->set_priority(THREAD_PRIORITY_MIN);
  337. idle_thread->set_name(KString::must_create("idle thread #0"));
  338. set_idle_thread(idle_thread);
  339. }
  340. UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
  341. {
  342. idle_thread->set_idle_thread();
  343. Processor::current().set_idle_thread(*idle_thread);
  344. Processor::set_current_thread(*idle_thread);
  345. }
  346. UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
  347. {
  348. VERIFY(cpu != 0);
  349. // This function is called on the bsp, but creates an idle thread for another AP
  350. VERIFY(Processor::is_bootstrap_processor());
  351. VERIFY(s_colonel_process);
  352. Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false);
  353. VERIFY(idle_thread);
  354. return idle_thread;
  355. }
  356. void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel)
  357. {
  358. g_total_time_scheduled.with([&](auto& total_time_scheduled) {
  359. total_time_scheduled.total += time_to_add;
  360. if (is_kernel)
  361. total_time_scheduled.total_kernel += time_to_add;
  362. });
  363. }
  364. void Scheduler::timer_tick(const RegisterState& regs)
  365. {
  366. VERIFY_INTERRUPTS_DISABLED();
  367. VERIFY(Processor::current_in_irq());
  368. auto* current_thread = Processor::current_thread();
  369. if (!current_thread)
  370. return;
  371. // Sanity checks
  372. VERIFY(current_thread->current_trap());
  373. VERIFY(current_thread->current_trap()->regs == &regs);
  374. #if !SCHEDULE_ON_ALL_PROCESSORS
  375. if (!Processor::is_bootstrap_processor())
  376. return; // TODO: This prevents scheduling on other CPUs!
  377. #endif
  378. if (current_thread->process().is_kernel_process()) {
  379. // Because the previous mode when entering/exiting kernel threads never changes
  380. // we never update the time scheduled. So we need to update it manually on the
  381. // timer interrupt
  382. current_thread->update_time_scheduled(current_time(), true, false);
  383. }
  384. if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
  385. SpinlockLocker scheduler_lock(g_scheduler_lock);
  386. dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::current_id(), *current_thread);
  387. current_thread->set_state(Thread::State::Dying);
  388. Processor::current().invoke_scheduler_async();
  389. return;
  390. }
  391. if (current_thread->tick())
  392. return;
  393. if (!current_thread->is_idle_thread() && !peek_next_runnable_thread()) {
  394. // If no other thread is ready to be scheduled we don't need to
  395. // switch to the idle thread. Just give the current thread another
  396. // time slice and let it run!
  397. current_thread->set_ticks_left(time_slice_for(*current_thread));
  398. current_thread->did_schedule();
  399. dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: No other threads ready, give {} another timeslice", Processor::current_id(), *current_thread);
  400. return;
  401. }
  402. VERIFY_INTERRUPTS_DISABLED();
  403. VERIFY(Processor::current_in_irq());
  404. Processor::current().invoke_scheduler_async();
  405. }
  406. void Scheduler::invoke_async()
  407. {
  408. VERIFY_INTERRUPTS_DISABLED();
  409. VERIFY(!Processor::current_in_irq());
  410. // Since this function is called when leaving critical sections (such
  411. // as a Spinlock), we need to check if we're not already doing this
  412. // to prevent recursion
  413. if (!Processor::current_in_scheduler())
  414. pick_next();
  415. }
  416. void Scheduler::notify_finalizer()
  417. {
  418. if (!g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel))
  419. g_finalizer_wait_queue->wake_all();
  420. }
  421. void Scheduler::idle_loop(void*)
  422. {
  423. auto& proc = Processor::current();
  424. dbgln("Scheduler[{}]: idle loop running", proc.id());
  425. VERIFY(are_interrupts_enabled());
  426. for (;;) {
  427. proc.idle_begin();
  428. asm("hlt");
  429. proc.idle_end();
  430. VERIFY_INTERRUPTS_ENABLED();
  431. #if SCHEDULE_ON_ALL_PROCESSORS
  432. yield();
  433. #else
  434. if (Processor::current_id() == 0)
  435. yield();
  436. #endif
  437. }
  438. }
  439. void Scheduler::dump_scheduler_state(bool with_stack_traces)
  440. {
  441. dump_thread_list(with_stack_traces);
  442. }
  443. bool Scheduler::is_initialized()
  444. {
  445. // The scheduler is initialized iff the idle thread exists
  446. return Processor::idle_thread() != nullptr;
  447. }
  448. TotalTimeScheduled Scheduler::get_total_time_scheduled()
  449. {
  450. return g_total_time_scheduled.with([&](auto& total_time_scheduled) { return total_time_scheduled; });
  451. }
  452. void dump_thread_list(bool with_stack_traces)
  453. {
  454. dbgln("Scheduler thread list for processor {}:", Processor::current_id());
  455. auto get_cs = [](Thread& thread) -> u16 {
  456. if (!thread.current_trap())
  457. return thread.regs().cs;
  458. return thread.get_register_dump_from_stack().cs;
  459. };
  460. auto get_eip = [](Thread& thread) -> u32 {
  461. if (!thread.current_trap())
  462. return thread.regs().ip();
  463. return thread.get_register_dump_from_stack().ip();
  464. };
  465. Thread::for_each([&](Thread& thread) {
  466. switch (thread.state()) {
  467. case Thread::State::Dying:
  468. dmesgln(" {:14} {:30} @ {:04x}:{:08x} Finalizable: {}, (nsched: {})",
  469. thread.state_string(),
  470. thread,
  471. get_cs(thread),
  472. get_eip(thread),
  473. thread.is_finalizable(),
  474. thread.times_scheduled());
  475. break;
  476. default:
  477. dmesgln(" {:14} Pr:{:2} {:30} @ {:04x}:{:08x} (nsched: {})",
  478. thread.state_string(),
  479. thread.priority(),
  480. thread,
  481. get_cs(thread),
  482. get_eip(thread),
  483. thread.times_scheduled());
  484. break;
  485. }
  486. if (with_stack_traces) {
  487. auto trace_or_error = thread.backtrace();
  488. if (!trace_or_error.is_error()) {
  489. auto trace = trace_or_error.release_value();
  490. dbgln("Backtrace:");
  491. kernelputstr(trace->characters(), trace->length());
  492. }
  493. }
  494. return IterationDecision::Continue;
  495. });
  496. }
  497. }