2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2022-08-18 16:56:50 +00:00
|
|
|
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2021-12-19 21:46:55 +00:00
|
|
|
#include <AK/BuiltinWrappers.h>
|
2020-08-01 20:37:40 +00:00
|
|
|
#include <AK/ScopeGuard.h>
|
2021-08-08 10:31:42 +00:00
|
|
|
#include <AK/Singleton.h>
|
2020-08-03 15:43:19 +00:00
|
|
|
#include <AK/Time.h>
|
2022-10-16 14:57:21 +00:00
|
|
|
#include <Kernel/Arch/TrapFrame.h>
|
2021-01-25 15:07:10 +00:00
|
|
|
#include <Kernel/Debug.h>
|
2022-10-05 17:27:36 +00:00
|
|
|
#include <Kernel/InterruptDisabler.h>
|
2021-02-14 08:30:31 +00:00
|
|
|
#include <Kernel/Panic.h>
|
2021-05-14 05:48:53 +00:00
|
|
|
#include <Kernel/PerformanceManager.h>
|
2019-06-07 17:29:34 +00:00
|
|
|
#include <Kernel/Process.h>
|
|
|
|
#include <Kernel/Scheduler.h>
|
2021-06-22 15:40:16 +00:00
|
|
|
#include <Kernel/Sections.h>
|
Kernel: Introduce the new Time management subsystem
This new subsystem includes better abstractions of how time will be
handled in the OS. We take advantage of the existing RTC timer to aid
in keeping time synchronized. This is standing in contrast to how we
handled time-keeping in the kernel, where the PIT was responsible for
that function in addition to update the scheduler about ticks.
With that new advantage, we can easily change the ticking dynamically
and still keep the time synchronized.
In the process context, we no longer use a fixed declaration of
TICKS_PER_SECOND, but we call the TimeManagement singleton class to
provide us the right value. This allows us to use dynamic ticking in
the future, a feature known as tickless kernel.
The scheduler no longer does by himself the calculation of real time
(Unix time), and just calls the TimeManagment singleton class to provide
the value.
Also, we can use 2 new boot arguments:
- the "time" boot argument accpets either the value "modern", or
"legacy". If "modern" is specified, the time management subsystem will
try to setup HPET. Otherwise, for "legacy" value, the time subsystem
will revert to use the PIT & RTC, leaving HPET disabled.
If this boot argument is not specified, the default pattern is to try
to setup HPET.
- the "hpet" boot argumet accepts either the value "periodic" or
"nonperiodic". If "periodic" is specified, the HPET will scan for
periodic timers, and will assert if none are found. If only one is
found, that timer will be assigned for the time-keeping task. If more
than one is found, both time-keeping task & scheduler-ticking task
will be assigned to periodic timers.
If this boot argument is not specified, the default pattern is to try
to scan for HPET periodic timers. This boot argument has no effect if
HPET is disabled.
In hardware context, PIT & RealTimeClock classes are merely inheriting
from the HardwareTimer class, and they allow to use the old i8254 (PIT)
and RTC devices, managing them via IO ports. By default, the RTC will be
programmed to a frequency of 1024Hz. The PIT will be programmed to a
frequency close to 1000Hz.
About HPET, depending if we need to scan for periodic timers or not,
we try to set a frequency close to 1000Hz for the time-keeping timer
and scheduler-ticking timer. Also, if possible, we try to enable the
Legacy replacement feature of the HPET. This feature if exists,
instructs the chipset to disconnect both i8254 (PIT) and RTC.
This behavior is observable on QEMU, and was verified against the source
code:
https://github.com/qemu/qemu/commit/ce967e2f33861b0e17753f97fa4527b5943c94b6
The HPETComparator class is inheriting from HardwareTimer class, and is
responsible for an individual HPET comparator, which is essentially a
timer. Therefore, it needs to call the singleton HPET class to perform
HPET-related operations.
The new abstraction of Hardware timers brings an opportunity of more new
features in the foreseeable future. For example, we can change the
callback function of each hardware timer, thus it makes it possible to
swap missions between hardware timers, or to allow to use a hardware
timer for other temporary missions (e.g. calibrating the LAPIC timer,
measuring the CPU frequency, etc).
2020-03-09 15:03:27 +00:00
|
|
|
#include <Kernel/Time/TimeManagement.h>
|
2022-01-15 19:19:41 +00:00
|
|
|
#include <Kernel/kstdio.h>
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2022-08-18 19:46:28 +00:00
|
|
|
RecursiveSpinlock g_scheduler_lock { LockRank::None };
|
2019-07-19 15:21:13 +00:00
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
static u32 time_slice_for(Thread const& thread)
|
2019-02-07 11:21:17 +00:00
|
|
|
{
|
2020-12-04 05:12:50 +00:00
|
|
|
// One time slice unit == 4ms (assuming 250 ticks/second)
|
2021-01-29 03:07:41 +00:00
|
|
|
if (thread.is_idle_thread())
|
2019-04-20 13:58:45 +00:00
|
|
|
return 1;
|
2020-12-04 05:12:50 +00:00
|
|
|
return 2;
|
2019-02-07 11:21:17 +00:00
|
|
|
}
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2021-02-14 16:39:41 +00:00
|
|
|
READONLY_AFTER_INIT Thread* g_finalizer;
|
|
|
|
READONLY_AFTER_INIT WaitQueue* g_finalizer_wait_queue;
|
2020-07-30 19:46:06 +00:00
|
|
|
Atomic<bool> g_finalizer_has_work { false };
|
2021-02-14 16:39:41 +00:00
|
|
|
READONLY_AFTER_INIT static Process* s_colonel_process;
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2021-01-22 23:56:08 +00:00
|
|
|
struct ThreadReadyQueue {
|
2021-09-09 12:00:59 +00:00
|
|
|
IntrusiveList<&Thread::m_ready_queue_node> thread_list;
|
2021-01-22 23:56:08 +00:00
|
|
|
};
|
2021-08-08 10:31:42 +00:00
|
|
|
|
|
|
|
struct ThreadReadyQueues {
|
|
|
|
u32 mask {};
|
|
|
|
static constexpr size_t count = sizeof(mask) * 8;
|
|
|
|
Array<ThreadReadyQueue, count> queues;
|
|
|
|
};
|
|
|
|
|
2021-08-21 23:37:17 +00:00
|
|
|
static Singleton<SpinlockProtected<ThreadReadyQueues>> g_ready_queues;
|
2021-07-15 03:46:32 +00:00
|
|
|
|
2022-08-18 19:46:28 +00:00
|
|
|
static SpinlockProtected<TotalTimeScheduled> g_total_time_scheduled { LockRank::None };
|
2021-07-15 03:46:32 +00:00
|
|
|
|
2021-07-15 20:54:19 +00:00
|
|
|
static void dump_thread_list(bool = false);
|
2021-01-22 23:56:08 +00:00
|
|
|
|
|
|
|
static inline u32 thread_priority_to_priority_index(u32 thread_priority)
|
|
|
|
{
|
|
|
|
// Converts the priority in the range of THREAD_PRIORITY_MIN...THREAD_PRIORITY_MAX
|
|
|
|
// to a index into g_ready_queues where 0 is the highest priority bucket
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(thread_priority >= THREAD_PRIORITY_MIN && thread_priority <= THREAD_PRIORITY_MAX);
|
2021-01-22 23:56:08 +00:00
|
|
|
constexpr u32 thread_priority_count = THREAD_PRIORITY_MAX - THREAD_PRIORITY_MIN + 1;
|
|
|
|
static_assert(thread_priority_count > 0);
|
2021-08-08 10:31:42 +00:00
|
|
|
auto priority_bucket = ((thread_priority_count - (thread_priority - THREAD_PRIORITY_MIN)) / thread_priority_count) * (ThreadReadyQueues::count - 1);
|
|
|
|
VERIFY(priority_bucket < ThreadReadyQueues::count);
|
2021-01-22 23:56:08 +00:00
|
|
|
return priority_bucket;
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread& Scheduler::pull_next_runnable_thread()
|
|
|
|
{
|
2021-08-22 10:37:50 +00:00
|
|
|
auto affinity_mask = 1u << Processor::current_id();
|
2021-01-22 23:56:08 +00:00
|
|
|
|
2021-08-08 10:31:42 +00:00
|
|
|
return g_ready_queues->with([&](auto& ready_queues) -> Thread& {
|
|
|
|
auto priority_mask = ready_queues.mask;
|
|
|
|
while (priority_mask != 0) {
|
2021-12-19 21:46:55 +00:00
|
|
|
auto priority = bit_scan_forward(priority_mask);
|
2021-08-08 10:31:42 +00:00
|
|
|
VERIFY(priority > 0);
|
|
|
|
auto& ready_queue = ready_queues.queues[--priority];
|
|
|
|
for (auto& thread : ready_queue.thread_list) {
|
|
|
|
VERIFY(thread.m_runnable_priority == (int)priority);
|
|
|
|
if (thread.is_active())
|
|
|
|
continue;
|
|
|
|
if (!(thread.affinity() & affinity_mask))
|
|
|
|
continue;
|
|
|
|
thread.m_runnable_priority = -1;
|
|
|
|
ready_queue.thread_list.remove(thread);
|
|
|
|
if (ready_queue.thread_list.is_empty())
|
|
|
|
ready_queues.mask &= ~(1u << priority);
|
|
|
|
// Mark it as active because we are using this thread. This is similar
|
|
|
|
// to comparing it with Processor::current_thread, but when there are
|
|
|
|
// multiple processors there's no easy way to check whether the thread
|
|
|
|
// is actually still needed. This prevents accidental finalization when
|
|
|
|
// a thread is no longer in Running state, but running on another core.
|
|
|
|
|
|
|
|
// We need to mark it active here so that this thread won't be
|
|
|
|
// scheduled on another core if it were to be queued before actually
|
|
|
|
// switching to it.
|
|
|
|
// FIXME: Figure out a better way maybe?
|
|
|
|
thread.set_active(true);
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
priority_mask &= ~(1u << priority);
|
2021-01-22 23:56:08 +00:00
|
|
|
}
|
2021-08-08 10:31:42 +00:00
|
|
|
return *Processor::idle_thread();
|
|
|
|
});
|
2021-01-22 23:56:08 +00:00
|
|
|
}
|
|
|
|
|
2021-07-12 02:12:42 +00:00
|
|
|
Thread* Scheduler::peek_next_runnable_thread()
|
|
|
|
{
|
2021-08-22 10:37:50 +00:00
|
|
|
auto affinity_mask = 1u << Processor::current_id();
|
2021-07-12 02:12:42 +00:00
|
|
|
|
2021-08-08 10:31:42 +00:00
|
|
|
return g_ready_queues->with([&](auto& ready_queues) -> Thread* {
|
|
|
|
auto priority_mask = ready_queues.mask;
|
|
|
|
while (priority_mask != 0) {
|
2021-12-19 21:46:55 +00:00
|
|
|
auto priority = bit_scan_forward(priority_mask);
|
2021-08-08 10:31:42 +00:00
|
|
|
VERIFY(priority > 0);
|
|
|
|
auto& ready_queue = ready_queues.queues[--priority];
|
|
|
|
for (auto& thread : ready_queue.thread_list) {
|
|
|
|
VERIFY(thread.m_runnable_priority == (int)priority);
|
|
|
|
if (thread.is_active())
|
|
|
|
continue;
|
|
|
|
if (!(thread.affinity() & affinity_mask))
|
|
|
|
continue;
|
|
|
|
return &thread;
|
|
|
|
}
|
|
|
|
priority_mask &= ~(1u << priority);
|
2021-07-12 02:12:42 +00:00
|
|
|
}
|
|
|
|
|
2021-08-08 10:31:42 +00:00
|
|
|
// Unlike in pull_next_runnable_thread() we don't want to fall back to
|
|
|
|
// the idle thread. We just want to see if we have any other thread ready
|
|
|
|
// to be scheduled.
|
|
|
|
return nullptr;
|
|
|
|
});
|
2021-07-12 02:12:42 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 23:56:08 +00:00
|
|
|
bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
|
|
|
|
{
|
2021-01-29 03:07:41 +00:00
|
|
|
if (thread.is_idle_thread())
|
2021-01-22 23:56:08 +00:00
|
|
|
return true;
|
|
|
|
|
2021-08-08 10:31:42 +00:00
|
|
|
return g_ready_queues->with([&](auto& ready_queues) {
|
|
|
|
auto priority = thread.m_runnable_priority;
|
|
|
|
if (priority < 0) {
|
|
|
|
VERIFY(!thread.m_ready_queue_node.is_in_list());
|
|
|
|
return false;
|
|
|
|
}
|
2021-01-22 23:56:08 +00:00
|
|
|
|
2021-08-22 10:37:50 +00:00
|
|
|
if (check_affinity && !(thread.affinity() & (1 << Processor::current_id())))
|
2021-08-08 10:31:42 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
VERIFY(ready_queues.mask & (1u << priority));
|
|
|
|
auto& ready_queue = ready_queues.queues[priority];
|
|
|
|
thread.m_runnable_priority = -1;
|
|
|
|
ready_queue.thread_list.remove(thread);
|
|
|
|
if (ready_queue.thread_list.is_empty())
|
|
|
|
ready_queues.mask &= ~(1u << priority);
|
|
|
|
return true;
|
|
|
|
});
|
2021-01-22 23:56:08 +00:00
|
|
|
}
|
|
|
|
|
2021-08-08 12:19:55 +00:00
|
|
|
void Scheduler::enqueue_runnable_thread(Thread& thread)
|
2021-01-22 23:56:08 +00:00
|
|
|
{
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
2021-01-29 03:07:41 +00:00
|
|
|
if (thread.is_idle_thread())
|
2021-01-22 23:56:08 +00:00
|
|
|
return;
|
2021-01-28 07:25:05 +00:00
|
|
|
auto priority = thread_priority_to_priority_index(thread.priority());
|
2021-01-22 23:56:08 +00:00
|
|
|
|
2021-08-08 10:31:42 +00:00
|
|
|
g_ready_queues->with([&](auto& ready_queues) {
|
|
|
|
VERIFY(thread.m_runnable_priority < 0);
|
|
|
|
thread.m_runnable_priority = (int)priority;
|
|
|
|
VERIFY(!thread.m_ready_queue_node.is_in_list());
|
|
|
|
auto& ready_queue = ready_queues.queues[priority];
|
|
|
|
bool was_empty = ready_queue.thread_list.is_empty();
|
|
|
|
ready_queue.thread_list.append(thread);
|
|
|
|
if (was_empty)
|
|
|
|
ready_queues.mask |= (1u << priority);
|
|
|
|
});
|
2021-01-22 23:56:08 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void Scheduler::start()
|
2018-11-07 21:15:02 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2020-07-30 19:46:06 +00:00
|
|
|
|
2020-07-05 20:32:07 +00:00
|
|
|
// We need to acquire our scheduler lock, which will be released
|
|
|
|
// by the idle thread once control transferred there
|
|
|
|
g_scheduler_lock.lock();
|
|
|
|
|
2020-06-28 21:34:31 +00:00
|
|
|
auto& processor = Processor::current();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(processor.is_initialized());
|
2021-01-29 03:07:41 +00:00
|
|
|
auto& idle_thread = *Processor::idle_thread();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(processor.current_thread() == &idle_thread);
|
2020-06-28 21:34:31 +00:00
|
|
|
idle_thread.set_ticks_left(time_slice_for(idle_thread));
|
|
|
|
idle_thread.did_schedule();
|
|
|
|
idle_thread.set_initialized(true);
|
2020-07-03 11:19:50 +00:00
|
|
|
processor.init_context(idle_thread, false);
|
2022-01-30 10:38:50 +00:00
|
|
|
idle_thread.set_state(Thread::State::Running);
|
2021-08-22 10:37:50 +00:00
|
|
|
VERIFY(idle_thread.affinity() == (1u << processor.id()));
|
2020-06-28 21:34:31 +00:00
|
|
|
processor.initialize_context_switching(idle_thread);
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-06-27 19:42:28 +00:00
|
|
|
}
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2022-01-30 13:29:37 +00:00
|
|
|
void Scheduler::pick_next()
|
2020-06-27 19:42:28 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2021-08-08 09:09:35 +00:00
|
|
|
// Set the in_scheduler flag before acquiring the spinlock. This
|
2020-08-01 20:37:40 +00:00
|
|
|
// prevents a recursive call into Scheduler::invoke_async upon
|
|
|
|
// leaving the scheduler lock.
|
|
|
|
ScopedCritical critical;
|
2021-08-29 10:43:39 +00:00
|
|
|
Processor::set_current_in_scheduler(true);
|
2020-08-01 20:37:40 +00:00
|
|
|
ScopeGuard guard(
|
|
|
|
[]() {
|
|
|
|
// We may be on a different processor after we got switched
|
|
|
|
// back to this thread!
|
2021-08-29 10:43:39 +00:00
|
|
|
VERIFY(Processor::current_in_scheduler());
|
|
|
|
Processor::set_current_in_scheduler(false);
|
2020-08-01 20:37:40 +00:00
|
|
|
});
|
|
|
|
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker lock(g_scheduler_lock);
|
2020-06-28 21:34:31 +00:00
|
|
|
|
2021-01-23 22:59:27 +00:00
|
|
|
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
|
2021-04-18 17:57:17 +00:00
|
|
|
dump_thread_list();
|
2021-01-12 21:30:52 +00:00
|
|
|
}
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2021-01-22 23:56:08 +00:00
|
|
|
auto& thread_to_schedule = pull_next_runnable_thread();
|
2021-01-23 22:59:27 +00:00
|
|
|
if constexpr (SCHEDULER_DEBUG) {
|
2021-07-25 17:51:44 +00:00
|
|
|
dbgln("Scheduler[{}]: Switch to {} @ {:#04x}:{:p}",
|
2021-08-22 10:37:50 +00:00
|
|
|
Processor::current_id(),
|
2021-01-22 23:56:08 +00:00
|
|
|
thread_to_schedule,
|
2021-07-18 23:50:08 +00:00
|
|
|
thread_to_schedule.regs().cs, thread_to_schedule.regs().ip());
|
2021-01-12 21:30:52 +00:00
|
|
|
}
|
2019-12-30 17:46:17 +00:00
|
|
|
|
2020-08-01 20:37:40 +00:00
|
|
|
// We need to leave our first critical section before switching context,
|
|
|
|
// but since we're still holding the scheduler lock we're still in a critical section
|
|
|
|
critical.leave();
|
|
|
|
|
2021-01-22 23:56:08 +00:00
|
|
|
thread_to_schedule.set_ticks_left(time_slice_for(thread_to_schedule));
|
2022-01-30 13:29:37 +00:00
|
|
|
context_switch(&thread_to_schedule);
|
2018-11-07 21:15:02 +00:00
|
|
|
}
|
|
|
|
|
2022-01-30 13:29:37 +00:00
|
|
|
void Scheduler::yield()
|
2020-06-27 19:42:28 +00:00
|
|
|
{
|
2020-07-03 11:19:50 +00:00
|
|
|
InterruptDisabler disabler;
|
2020-08-01 20:37:40 +00:00
|
|
|
|
2021-12-29 00:01:27 +00:00
|
|
|
auto const* current_thread = Thread::current();
|
2021-08-22 10:37:50 +00:00
|
|
|
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::current_id(), *current_thread, Processor::current_in_irq());
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread != nullptr);
|
2021-08-22 10:21:31 +00:00
|
|
|
if (Processor::current_in_irq() || Processor::in_critical()) {
|
2020-07-03 11:19:50 +00:00
|
|
|
// If we're handling an IRQ we can't switch context, or we're in
|
|
|
|
// a critical section where we don't want to switch contexts, then
|
|
|
|
// delay until exiting the trap or critical section
|
2021-08-22 10:21:31 +00:00
|
|
|
Processor::current().invoke_scheduler_async();
|
2022-01-30 13:29:37 +00:00
|
|
|
return;
|
2020-08-01 20:37:40 +00:00
|
|
|
}
|
|
|
|
|
2022-01-30 13:29:37 +00:00
|
|
|
Scheduler::pick_next();
|
2020-06-27 19:42:28 +00:00
|
|
|
}
|
|
|
|
|
2022-01-30 13:29:37 +00:00
|
|
|
void Scheduler::context_switch(Thread* thread)
|
2018-11-07 21:15:02 +00:00
|
|
|
{
|
2020-07-05 20:32:07 +00:00
|
|
|
thread->did_schedule();
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2021-12-29 00:01:27 +00:00
|
|
|
auto* from_thread = Thread::current();
|
2022-01-30 13:04:46 +00:00
|
|
|
VERIFY(from_thread);
|
|
|
|
|
2020-07-05 20:32:07 +00:00
|
|
|
if (from_thread == thread)
|
2022-01-30 13:29:37 +00:00
|
|
|
return;
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2022-01-30 13:04:46 +00:00
|
|
|
// If the last process hasn't blocked (still marked as running),
|
|
|
|
// mark it as runnable for the next round.
|
|
|
|
if (from_thread->state() == Thread::State::Running)
|
|
|
|
from_thread->set_state(Thread::State::Runnable);
|
2018-11-07 21:24:20 +00:00
|
|
|
|
|
|
|
#ifdef LOG_EVERY_CONTEXT_SWITCH
|
2022-04-01 17:58:27 +00:00
|
|
|
auto const msg = "Scheduler[{}]: {} -> {} [prio={}] {:#04x}:{:p}";
|
2021-07-18 23:50:08 +00:00
|
|
|
|
2022-01-30 13:04:46 +00:00
|
|
|
dbgln(msg,
|
|
|
|
Processor::current_id(), from_thread->tid().value(),
|
|
|
|
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().ip());
|
2018-11-07 21:24:20 +00:00
|
|
|
#endif
|
2018-11-07 21:15:02 +00:00
|
|
|
|
2020-07-03 11:19:50 +00:00
|
|
|
auto& proc = Processor::current();
|
2020-07-05 20:32:07 +00:00
|
|
|
if (!thread->is_initialized()) {
|
|
|
|
proc.init_context(*thread, false);
|
|
|
|
thread->set_initialized(true);
|
2019-09-07 13:50:44 +00:00
|
|
|
}
|
2022-01-30 10:38:50 +00:00
|
|
|
thread->set_state(Thread::State::Running);
|
2020-07-05 20:32:07 +00:00
|
|
|
|
2021-05-14 05:48:53 +00:00
|
|
|
PerformanceManager::add_context_switch_perf_event(*from_thread, *thread);
|
|
|
|
|
2020-07-05 20:32:07 +00:00
|
|
|
proc.switch_context(from_thread, thread);
|
|
|
|
|
|
|
|
// NOTE: from_thread at this point reflects the thread we were
|
|
|
|
// switched from, and thread reflects Thread::current()
|
2022-01-29 12:57:39 +00:00
|
|
|
enter_current(*from_thread);
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(thread == Thread::current());
|
2022-02-21 17:54:27 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
SpinlockLocker lock(thread->get_lock());
|
|
|
|
thread->dispatch_one_pending_signal();
|
|
|
|
}
|
2018-11-07 21:15:02 +00:00
|
|
|
}
|
|
|
|
|
2022-01-29 12:57:39 +00:00
|
|
|
void Scheduler::enter_current(Thread& prev_thread)
|
2020-07-05 20:32:07 +00:00
|
|
|
{
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
2021-07-15 03:46:32 +00:00
|
|
|
|
|
|
|
// We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
|
2022-10-10 14:36:18 +00:00
|
|
|
auto scheduler_time = TimeManagement::scheduler_current_time();
|
2021-07-15 03:46:32 +00:00
|
|
|
prev_thread.update_time_scheduled(scheduler_time, true, true);
|
|
|
|
auto* current_thread = Thread::current();
|
|
|
|
current_thread->update_time_scheduled(scheduler_time, true, false);
|
|
|
|
|
2022-01-30 15:09:38 +00:00
|
|
|
// NOTE: When doing an exec(), we will context switch from and to the same thread!
|
|
|
|
// In that case, we must not mark the previous thread as inactive.
|
|
|
|
if (&prev_thread != current_thread)
|
|
|
|
prev_thread.set_active(false);
|
|
|
|
|
2022-01-30 10:38:50 +00:00
|
|
|
if (prev_thread.state() == Thread::State::Dying) {
|
2020-07-05 20:32:07 +00:00
|
|
|
// If the thread we switched from is marked as dying, then notify
|
|
|
|
// the finalizer. Note that as soon as we leave the scheduler lock
|
|
|
|
// the finalizer may free from_thread!
|
|
|
|
notify_finalizer();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-23 19:42:30 +00:00
|
|
|
void Scheduler::leave_on_first_switch(InterruptsState previous_interrupts_state)
|
2020-08-01 20:37:40 +00:00
|
|
|
{
|
2020-10-02 21:14:37 +00:00
|
|
|
// This is called when a thread is switched into for the first time.
|
2020-08-01 20:37:40 +00:00
|
|
|
// At this point, enter_current has already be called, but because
|
|
|
|
// Scheduler::context_switch is not in the call stack we need to
|
|
|
|
// clean up and release locks manually here
|
2022-08-23 19:42:30 +00:00
|
|
|
g_scheduler_lock.unlock(previous_interrupts_state);
|
2021-08-29 10:43:39 +00:00
|
|
|
|
|
|
|
VERIFY(Processor::current_in_scheduler());
|
|
|
|
Processor::set_current_in_scheduler(false);
|
2020-08-01 20:37:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::prepare_after_exec()
|
|
|
|
{
|
|
|
|
// This is called after exec() when doing a context "switch" into
|
|
|
|
// the new process. This is called from Processor::assume_context
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
2021-08-29 10:43:39 +00:00
|
|
|
|
|
|
|
VERIFY(!Processor::current_in_scheduler());
|
|
|
|
Processor::set_current_in_scheduler(true);
|
2020-08-01 20:37:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::prepare_for_idle_loop()
|
|
|
|
{
|
|
|
|
// This is called when the CPU finished setting up the idle loop
|
2022-03-22 11:24:11 +00:00
|
|
|
// and is about to run it. We need to acquire the scheduler lock
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
|
2020-08-01 20:37:40 +00:00
|
|
|
g_scheduler_lock.lock();
|
2021-08-29 10:43:39 +00:00
|
|
|
|
|
|
|
VERIFY(!Processor::current_in_scheduler());
|
|
|
|
Processor::set_current_in_scheduler(true);
|
2020-08-01 20:37:40 +00:00
|
|
|
}
|
|
|
|
|
2019-02-04 09:28:12 +00:00
|
|
|
Process* Scheduler::colonel()
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(s_colonel_process);
|
2019-02-04 09:28:12 +00:00
|
|
|
return s_colonel_process;
|
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void Scheduler::initialize()
|
2018-11-07 21:15:02 +00:00
|
|
|
{
|
2021-06-30 05:15:21 +00:00
|
|
|
VERIFY(Processor::is_initialized()); // sanity check
|
2022-10-10 14:36:18 +00:00
|
|
|
VERIFY(TimeManagement::is_initialized());
|
2021-07-15 03:46:32 +00:00
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
LockRefPtr<Thread> idle_thread;
|
2020-07-06 13:27:22 +00:00
|
|
|
g_finalizer_wait_queue = new WaitQueue;
|
2020-06-28 22:05:52 +00:00
|
|
|
|
2020-07-06 13:27:22 +00:00
|
|
|
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
|
2022-07-11 17:32:29 +00:00
|
|
|
s_colonel_process = Process::create_kernel_process(idle_thread, KString::must_create("colonel"sv), idle_loop, nullptr, 1, Process::RegisterProcess::No).leak_ref();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(s_colonel_process);
|
|
|
|
VERIFY(idle_thread);
|
2020-07-06 13:27:22 +00:00
|
|
|
idle_thread->set_priority(THREAD_PRIORITY_MIN);
|
2022-07-11 17:32:29 +00:00
|
|
|
idle_thread->set_name(KString::must_create("Idle Task #0"sv));
|
2020-06-28 21:34:31 +00:00
|
|
|
|
2020-07-06 13:27:22 +00:00
|
|
|
set_idle_thread(idle_thread);
|
|
|
|
}
|
|
|
|
|
2021-02-19 20:29:46 +00:00
|
|
|
UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
|
2020-07-06 13:27:22 +00:00
|
|
|
{
|
2021-01-29 03:07:41 +00:00
|
|
|
idle_thread->set_idle_thread();
|
2020-06-28 21:34:31 +00:00
|
|
|
Processor::current().set_idle_thread(*idle_thread);
|
2021-07-25 17:48:55 +00:00
|
|
|
Processor::set_current_thread(*idle_thread);
|
2020-07-06 13:27:22 +00:00
|
|
|
}
|
2020-06-28 22:05:52 +00:00
|
|
|
|
2021-02-19 20:29:46 +00:00
|
|
|
UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
|
2020-07-06 13:27:22 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(cpu != 0);
|
2020-07-06 13:27:22 +00:00
|
|
|
// This function is called on the bsp, but creates an idle thread for another AP
|
2021-05-05 16:48:26 +00:00
|
|
|
VERIFY(Processor::is_bootstrap_processor());
|
2020-07-06 13:27:22 +00:00
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(s_colonel_process);
|
2021-12-28 08:38:41 +00:00
|
|
|
Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false);
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(idle_thread);
|
2020-07-06 13:27:22 +00:00
|
|
|
return idle_thread;
|
2018-11-07 21:15:02 +00:00
|
|
|
}
|
2018-11-07 23:24:59 +00:00
|
|
|
|
2021-07-15 03:46:32 +00:00
|
|
|
void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel)
|
|
|
|
{
|
2021-08-08 10:34:30 +00:00
|
|
|
g_total_time_scheduled.with([&](auto& total_time_scheduled) {
|
|
|
|
total_time_scheduled.total += time_to_add;
|
|
|
|
if (is_kernel)
|
|
|
|
total_time_scheduled.total_kernel += time_to_add;
|
|
|
|
});
|
2021-07-15 03:46:32 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 17:58:27 +00:00
|
|
|
void Scheduler::timer_tick(RegisterState const& regs)
|
2018-11-07 23:24:59 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(Processor::current_in_irq());
|
2020-07-30 21:38:15 +00:00
|
|
|
|
2021-12-29 00:01:27 +00:00
|
|
|
auto* current_thread = Processor::current_thread();
|
2020-06-28 21:34:31 +00:00
|
|
|
if (!current_thread)
|
2018-11-07 23:24:59 +00:00
|
|
|
return;
|
|
|
|
|
2021-01-25 20:19:34 +00:00
|
|
|
// Sanity checks
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread->current_trap());
|
|
|
|
VERIFY(current_thread->current_trap()->regs == ®s);
|
2021-01-25 20:19:34 +00:00
|
|
|
|
2021-07-15 03:46:32 +00:00
|
|
|
if (current_thread->process().is_kernel_process()) {
|
|
|
|
// Because the previous mode when entering/exiting kernel threads never changes
|
|
|
|
// we never update the time scheduled. So we need to update it manually on the
|
|
|
|
// timer interrupt
|
2022-10-10 14:36:18 +00:00
|
|
|
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), true, false);
|
2021-07-15 03:46:32 +00:00
|
|
|
}
|
2021-07-14 18:05:59 +00:00
|
|
|
|
2021-07-15 03:46:32 +00:00
|
|
|
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
2021-08-22 10:37:50 +00:00
|
|
|
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::current_id(), *current_thread);
|
2022-01-30 10:38:50 +00:00
|
|
|
current_thread->set_state(Thread::State::Dying);
|
2021-07-15 03:46:32 +00:00
|
|
|
Processor::current().invoke_scheduler_async();
|
|
|
|
return;
|
2021-07-13 17:44:30 +00:00
|
|
|
}
|
2018-11-07 23:24:59 +00:00
|
|
|
|
2021-07-15 03:46:32 +00:00
|
|
|
if (current_thread->tick())
|
|
|
|
return;
|
|
|
|
|
2021-07-12 02:12:42 +00:00
|
|
|
if (!current_thread->is_idle_thread() && !peek_next_runnable_thread()) {
|
|
|
|
// If no other thread is ready to be scheduled we don't need to
|
|
|
|
// switch to the idle thread. Just give the current thread another
|
|
|
|
// time slice and let it run!
|
|
|
|
current_thread->set_ticks_left(time_slice_for(*current_thread));
|
|
|
|
current_thread->did_schedule();
|
2021-08-22 10:37:50 +00:00
|
|
|
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: No other threads ready, give {} another timeslice", Processor::current_id(), *current_thread);
|
2021-07-12 02:12:42 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(Processor::current_in_irq());
|
2020-06-27 19:42:28 +00:00
|
|
|
Processor::current().invoke_scheduler_async();
|
2018-11-07 23:24:59 +00:00
|
|
|
}
|
2019-09-14 17:44:22 +00:00
|
|
|
|
2020-06-27 19:42:28 +00:00
|
|
|
void Scheduler::invoke_async()
|
2019-09-14 17:44:22 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2021-08-22 10:21:31 +00:00
|
|
|
VERIFY(!Processor::current_in_irq());
|
2020-08-01 20:37:40 +00:00
|
|
|
|
|
|
|
// Since this function is called when leaving critical sections (such
|
2021-08-21 23:37:17 +00:00
|
|
|
// as a Spinlock), we need to check if we're not already doing this
|
2020-08-01 20:37:40 +00:00
|
|
|
// to prevent recursion
|
2021-08-29 10:43:39 +00:00
|
|
|
if (!Processor::current_in_scheduler())
|
2020-08-01 20:37:40 +00:00
|
|
|
pick_next();
|
2019-09-14 17:44:22 +00:00
|
|
|
}
|
|
|
|
|
2020-07-05 20:32:07 +00:00
|
|
|
void Scheduler::notify_finalizer()
|
|
|
|
{
|
2021-12-29 00:03:09 +00:00
|
|
|
if (!g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel))
|
2020-07-05 20:32:07 +00:00
|
|
|
g_finalizer_wait_queue->wake_all();
|
|
|
|
}
|
|
|
|
|
2020-11-17 03:51:34 +00:00
|
|
|
void Scheduler::idle_loop(void*)
|
2019-09-14 17:44:22 +00:00
|
|
|
{
|
2020-10-28 22:06:16 +00:00
|
|
|
auto& proc = Processor::current();
|
2021-08-22 10:37:50 +00:00
|
|
|
dbgln("Scheduler[{}]: idle loop running", proc.id());
|
2022-10-17 17:59:48 +00:00
|
|
|
VERIFY(Processor::are_interrupts_enabled());
|
2020-07-06 13:27:22 +00:00
|
|
|
|
2019-09-14 17:44:22 +00:00
|
|
|
for (;;) {
|
2020-10-28 22:06:16 +00:00
|
|
|
proc.idle_begin();
|
2019-09-14 17:44:22 +00:00
|
|
|
asm("hlt");
|
2020-07-30 19:46:06 +00:00
|
|
|
|
2020-10-28 22:06:16 +00:00
|
|
|
proc.idle_end();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_ENABLED();
|
2021-01-27 05:45:30 +00:00
|
|
|
yield();
|
2019-09-14 17:44:22 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
2021-07-15 20:54:19 +00:00
|
|
|
void Scheduler::dump_scheduler_state(bool with_stack_traces)
|
2021-04-18 17:57:17 +00:00
|
|
|
{
|
2021-07-15 20:54:19 +00:00
|
|
|
dump_thread_list(with_stack_traces);
|
2021-04-18 17:57:17 +00:00
|
|
|
}
|
|
|
|
|
2021-06-12 13:32:56 +00:00
|
|
|
bool Scheduler::is_initialized()
|
|
|
|
{
|
2021-06-28 15:06:26 +00:00
|
|
|
// The scheduler is initialized iff the idle thread exists
|
2021-06-12 13:32:56 +00:00
|
|
|
return Processor::idle_thread() != nullptr;
|
|
|
|
}
|
|
|
|
|
2021-07-15 03:46:32 +00:00
|
|
|
TotalTimeScheduled Scheduler::get_total_time_scheduled()
|
2021-07-14 18:05:59 +00:00
|
|
|
{
|
2021-08-08 10:34:30 +00:00
|
|
|
return g_total_time_scheduled.with([&](auto& total_time_scheduled) { return total_time_scheduled; });
|
2021-07-14 18:05:59 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 20:54:19 +00:00
|
|
|
void dump_thread_list(bool with_stack_traces)
|
2021-04-18 17:57:17 +00:00
|
|
|
{
|
2021-08-22 10:37:50 +00:00
|
|
|
dbgln("Scheduler thread list for processor {}:", Processor::current_id());
|
2021-04-18 17:57:17 +00:00
|
|
|
|
|
|
|
auto get_cs = [](Thread& thread) -> u16 {
|
2022-10-16 20:43:43 +00:00
|
|
|
#if ARCH(I386) || ARCH(X86_64)
|
2021-04-18 17:57:17 +00:00
|
|
|
if (!thread.current_trap())
|
2021-06-26 17:57:16 +00:00
|
|
|
return thread.regs().cs;
|
2021-04-18 17:57:17 +00:00
|
|
|
return thread.get_register_dump_from_stack().cs;
|
2022-10-16 20:43:43 +00:00
|
|
|
#elif ARCH(AARCH64)
|
|
|
|
(void)thread;
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
# error Unknown architecture
|
|
|
|
#endif
|
2021-04-18 17:57:17 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
auto get_eip = [](Thread& thread) -> u32 {
|
2021-06-28 15:06:36 +00:00
|
|
|
if (!thread.current_trap())
|
2021-07-18 23:50:08 +00:00
|
|
|
return thread.regs().ip();
|
|
|
|
return thread.get_register_dump_from_stack().ip();
|
2021-04-18 17:57:17 +00:00
|
|
|
};
|
|
|
|
|
2021-05-16 09:36:52 +00:00
|
|
|
Thread::for_each([&](Thread& thread) {
|
2022-08-25 15:43:32 +00:00
|
|
|
auto color = thread.process().is_kernel_process() ? "\x1b[34;1m"sv : "\x1b[33;1m"sv;
|
2021-04-18 17:57:17 +00:00
|
|
|
switch (thread.state()) {
|
2022-01-30 10:38:50 +00:00
|
|
|
case Thread::State::Dying:
|
2022-08-25 15:43:32 +00:00
|
|
|
dmesgln(" {}{:30}\x1b[0m @ {:04x}:{:08x} is {:14} (Finalizable: {}, nsched: {})",
|
|
|
|
color,
|
2021-04-18 17:57:17 +00:00
|
|
|
thread,
|
|
|
|
get_cs(thread),
|
|
|
|
get_eip(thread),
|
2022-08-25 15:43:32 +00:00
|
|
|
thread.state_string(),
|
2021-04-18 17:57:17 +00:00
|
|
|
thread.is_finalizable(),
|
|
|
|
thread.times_scheduled());
|
|
|
|
break;
|
|
|
|
default:
|
2022-08-25 15:43:32 +00:00
|
|
|
dmesgln(" {}{:30}\x1b[0m @ {:04x}:{:08x} is {:14} (Pr:{:2}, nsched: {})",
|
|
|
|
color,
|
2021-04-18 17:57:17 +00:00
|
|
|
thread,
|
|
|
|
get_cs(thread),
|
|
|
|
get_eip(thread),
|
2022-08-25 15:43:32 +00:00
|
|
|
thread.state_string(),
|
|
|
|
thread.priority(),
|
2021-04-18 17:57:17 +00:00
|
|
|
thread.times_scheduled());
|
|
|
|
break;
|
|
|
|
}
|
2022-08-25 10:18:16 +00:00
|
|
|
if (thread.state() == Thread::State::Blocked && thread.blocking_mutex()) {
|
|
|
|
dmesgln(" Blocking on Mutex {:#x} ({})", thread.blocking_mutex(), thread.blocking_mutex()->name());
|
|
|
|
}
|
|
|
|
if (thread.state() == Thread::State::Blocked && thread.blocker()) {
|
|
|
|
dmesgln(" Blocking on Blocker {:#x}", thread.blocker());
|
|
|
|
}
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
thread.for_each_held_lock([](auto const& entry) {
|
|
|
|
dmesgln(" Holding lock {:#x} ({}) at {}", entry.lock, entry.lock->name(), entry.lock_location);
|
|
|
|
});
|
|
|
|
#endif
|
2022-01-15 19:19:41 +00:00
|
|
|
if (with_stack_traces) {
|
|
|
|
auto trace_or_error = thread.backtrace();
|
|
|
|
if (!trace_or_error.is_error()) {
|
|
|
|
auto trace = trace_or_error.release_value();
|
|
|
|
dbgln("Backtrace:");
|
|
|
|
kernelputstr(trace->characters(), trace->length());
|
|
|
|
}
|
|
|
|
}
|
2021-08-15 10:38:02 +00:00
|
|
|
return IterationDecision::Continue;
|
2021-04-18 17:57:17 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|