mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 15:40:19 +00:00
Kernel: Rename Processor::id() => current_id()
And let id() be the non-static version that gives you the ID of a Processor object.
This commit is contained in:
parent
0f03a8aece
commit
dea93a8bb9
Notes:
sideshowbarker
2024-07-18 05:22:55 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/dea93a8bb9f
9 changed files with 54 additions and 54 deletions
|
@ -789,9 +789,9 @@ void vdbgln(StringView fmtstr, TypeErasedFormatParams params)
|
|||
# ifdef KERNEL
|
||||
if (Kernel::Processor::is_initialized() && Kernel::Thread::current()) {
|
||||
auto& thread = *Kernel::Thread::current();
|
||||
builder.appendff("\033[34;1m[#{} {}({}:{})]\033[0m: ", Kernel::Processor::id(), thread.process().name(), thread.pid().value(), thread.tid().value());
|
||||
builder.appendff("\033[34;1m[#{} {}({}:{})]\033[0m: ", Kernel::Processor::current_id(), thread.process().name(), thread.pid().value(), thread.tid().value());
|
||||
} else {
|
||||
builder.appendff("\033[34;1m[#{} Kernel]\033[0m: ", Kernel::Processor::id());
|
||||
builder.appendff("\033[34;1m[#{} Kernel]\033[0m: ", Kernel::Processor::current_id());
|
||||
}
|
||||
# else
|
||||
static TriState got_process_name = TriState::Unknown;
|
||||
|
|
|
@ -308,7 +308,7 @@ public:
|
|||
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_idle_thread));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 get_id() const
|
||||
ALWAYS_INLINE u32 id() const
|
||||
{
|
||||
// NOTE: This variant should only be used when iterating over all
|
||||
// Processor instances, or when it's guaranteed that the thread
|
||||
|
@ -318,7 +318,7 @@ public:
|
|||
return m_cpu;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static u32 id()
|
||||
ALWAYS_INLINE static u32 current_id()
|
||||
{
|
||||
// See comment in Processor::current_thread
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_cpu));
|
||||
|
@ -326,7 +326,7 @@ public:
|
|||
|
||||
ALWAYS_INLINE static bool is_bootstrap_processor()
|
||||
{
|
||||
return Processor::id() == 0;
|
||||
return Processor::current_id() == 0;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static FlatPtr current_in_irq()
|
||||
|
|
|
@ -223,7 +223,7 @@ void handle_crash(RegisterState const& regs, char const* description, int signal
|
|||
// make sure we switch back to the right page tables.
|
||||
MM.enter_process_paging_scope(process);
|
||||
|
||||
dmesgln("CRASH: CPU #{} {} in ring {}", Processor::id(), description, (regs.cs & 3));
|
||||
dmesgln("CRASH: CPU #{} {} in ring {}", Processor::current_id(), description, (regs.cs & 3));
|
||||
dump(regs);
|
||||
|
||||
if (!(regs.cs & 3)) {
|
||||
|
@ -275,7 +275,7 @@ void page_fault_handler(TrapFrame* trap)
|
|||
if constexpr (PAGE_FAULT_DEBUG) {
|
||||
u32 fault_page_directory = read_cr3();
|
||||
dbgln("CPU #{} ring {} {} page fault in PD={:#x}, {}{} {}",
|
||||
Processor::is_initialized() ? Processor::id() : 0,
|
||||
Processor::is_initialized() ? Processor::current_id() : 0,
|
||||
regs.cs & 3,
|
||||
regs.exception_code & 1 ? "PV" : "NP",
|
||||
fault_page_directory,
|
||||
|
|
|
@ -333,10 +333,10 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
|
|||
VERIFY(m_self == this);
|
||||
VERIFY(¤t() == this); // sanity check
|
||||
|
||||
dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
|
||||
dmesgln("CPU[{}]: Supported features: {}", current_id(), features_string());
|
||||
if (!has_feature(CPUFeature::RDRAND))
|
||||
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", id());
|
||||
dmesgln("CPU[{}]: Physical address bit width: {}", id(), m_physical_address_bit_width);
|
||||
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
|
||||
dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
|
||||
|
||||
if (cpu == 0)
|
||||
idt_init();
|
||||
|
@ -378,7 +378,7 @@ UNMAP_AFTER_INIT void Processor::detect_hypervisor()
|
|||
hypervisor_signature_buffer[12] = '\0';
|
||||
StringView hypervisor_signature(hypervisor_signature_buffer);
|
||||
|
||||
dmesgln("CPU[{}]: CPUID hypervisor signature '{}' ({:#x} {:#x} {:#x}), max leaf {:#x}", id(), hypervisor_signature, hypervisor_leaf_range.ebx(), hypervisor_leaf_range.ecx(), hypervisor_leaf_range.edx(), hypervisor_leaf_range.eax());
|
||||
dmesgln("CPU[{}]: CPUID hypervisor signature '{}' ({:#x} {:#x} {:#x}), max leaf {:#x}", current_id(), hypervisor_signature, hypervisor_leaf_range.ebx(), hypervisor_leaf_range.ecx(), hypervisor_leaf_range.edx(), hypervisor_leaf_range.eax());
|
||||
|
||||
if (hypervisor_signature == "Microsoft Hv"sv)
|
||||
detect_hypervisor_hyperv(hypervisor_leaf_range);
|
||||
|
@ -397,18 +397,18 @@ UNMAP_AFTER_INIT void Processor::detect_hypervisor_hyperv(CPUID const& hyperviso
|
|||
interface_signature_buffer[4] = '\0';
|
||||
StringView hyperv_interface_signature(interface_signature_buffer);
|
||||
|
||||
dmesgln("CPU[{}]: Hyper-V interface signature '{}' ({:#x})", id(), hyperv_interface_signature, hypervisor_interface.eax());
|
||||
dmesgln("CPU[{}]: Hyper-V interface signature '{}' ({:#x})", current_id(), hyperv_interface_signature, hypervisor_interface.eax());
|
||||
|
||||
if (hypervisor_leaf_range.eax() < 0x40000001)
|
||||
return;
|
||||
|
||||
CPUID hypervisor_sysid(0x40000002);
|
||||
dmesgln("CPU[{}]: Hyper-V system identity {}.{}, build number {}", id(), hypervisor_sysid.ebx() >> 16, hypervisor_sysid.ebx() & 0xFFFF, hypervisor_sysid.eax());
|
||||
dmesgln("CPU[{}]: Hyper-V system identity {}.{}, build number {}", current_id(), hypervisor_sysid.ebx() >> 16, hypervisor_sysid.ebx() & 0xFFFF, hypervisor_sysid.eax());
|
||||
|
||||
if (hypervisor_leaf_range.eax() < 0x40000005 || hyperv_interface_signature != "Hv#1"sv)
|
||||
return;
|
||||
|
||||
dmesgln("CPU[{}]: Hyper-V hypervisor detected", id());
|
||||
dmesgln("CPU[{}]: Hyper-V hypervisor detected", current_id());
|
||||
|
||||
// TODO: Actually do something with Hyper-V.
|
||||
}
|
||||
|
@ -510,7 +510,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
|
|||
lock.unlock();
|
||||
capture_current_thread();
|
||||
} else if (thread.is_active()) {
|
||||
VERIFY(thread.cpu() != Processor::id());
|
||||
VERIFY(thread.cpu() != Processor::current_id());
|
||||
// If this is the case, the thread is currently running
|
||||
// on another processor. We can't trust the kernel stack as
|
||||
// it may be changing at any time. We need to probably send
|
||||
|
@ -520,7 +520,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
|
|||
smp_unicast(
|
||||
thread.cpu(),
|
||||
[&]() {
|
||||
dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
|
||||
dbgln("CPU[{}] getting stack for cpu #{}", Processor::current_id(), proc.id());
|
||||
ProcessPagingScope paging_scope(thread.process());
|
||||
VERIFY(&Processor::current() != &proc);
|
||||
VERIFY(&thread == Processor::current_thread());
|
||||
|
@ -743,7 +743,7 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
|||
VERIFY(wake_count > 0);
|
||||
}
|
||||
|
||||
u32 current_id = Processor::current().id();
|
||||
u32 current_id = Processor::current_id();
|
||||
|
||||
u32 did_wake_count = 0;
|
||||
auto& apic = APIC::the();
|
||||
|
@ -841,7 +841,7 @@ bool Processor::smp_process_pending_messages()
|
|||
next_msg = cur_msg->next;
|
||||
auto msg = cur_msg->msg;
|
||||
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Processing message {}", id(), VirtualAddress(msg));
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Processing message {}", current_id(), VirtualAddress(msg));
|
||||
|
||||
switch (msg->type) {
|
||||
case ProcessorMessage::Callback:
|
||||
|
@ -853,7 +853,7 @@ bool Processor::smp_process_pending_messages()
|
|||
VERIFY(Memory::is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
|
||||
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
|
||||
// This processor isn't using this page directory right now, we can ignore this request
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", current_id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -890,7 +890,7 @@ bool Processor::smp_enqueue_message(ProcessorMessage& msg)
|
|||
// Note that it's quite possible that the other processor may pop
|
||||
// the queue at any given time. We rely on the fact that the messages
|
||||
// are pooled and never get freed!
|
||||
auto& msg_entry = msg.per_proc_entries[get_id()];
|
||||
auto& msg_entry = msg.per_proc_entries[id()];
|
||||
VERIFY(msg_entry.msg == &msg);
|
||||
ProcessorMessageEntry* next = nullptr;
|
||||
for (;;) {
|
||||
|
@ -907,16 +907,16 @@ bool Processor::smp_enqueue_message(ProcessorMessage& msg)
|
|||
|
||||
void Processor::smp_broadcast_message(ProcessorMessage& msg)
|
||||
{
|
||||
auto& cur_proc = Processor::current();
|
||||
auto& current_processor = Processor::current();
|
||||
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} processor: {}", current_processor.id(), VirtualAddress(&msg), count(), VirtualAddress(¤t_processor));
|
||||
|
||||
msg.refs.store(count() - 1, AK::MemoryOrder::memory_order_release);
|
||||
VERIFY(msg.refs > 0);
|
||||
bool need_broadcast = false;
|
||||
for_each(
|
||||
[&](Processor& proc) {
|
||||
if (&proc != &cur_proc) {
|
||||
if (&proc != ¤t_processor) {
|
||||
if (proc.smp_enqueue_message(msg))
|
||||
need_broadcast = true;
|
||||
}
|
||||
|
@ -948,15 +948,15 @@ void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
|
|||
|
||||
void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
|
||||
{
|
||||
auto& cur_proc = Processor::current();
|
||||
VERIFY(cpu != cur_proc.get_id());
|
||||
auto& target_proc = processors()[cpu];
|
||||
auto& current_processor = Processor::current();
|
||||
VERIFY(cpu != current_processor.id());
|
||||
auto& target_processor = processors()[cpu];
|
||||
msg.async = async;
|
||||
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Send message {} to cpu #{} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_proc));
|
||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Send message {} to cpu #{} processor: {}", current_processor.id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_processor));
|
||||
|
||||
msg.refs.store(1u, AK::MemoryOrder::memory_order_release);
|
||||
if (target_proc->smp_enqueue_message(msg)) {
|
||||
if (target_processor->smp_enqueue_message(msg)) {
|
||||
APIC::the().send_ipi(cpu);
|
||||
}
|
||||
|
||||
|
@ -969,7 +969,7 @@ void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
|
|||
// We need to process any messages that may have been sent to
|
||||
// us while we're waiting. This also checks if another processor
|
||||
// may have requested us to halt.
|
||||
cur_proc.smp_process_pending_messages();
|
||||
current_processor.smp_process_pending_messages();
|
||||
}
|
||||
|
||||
smp_cleanup_message(msg);
|
||||
|
@ -1279,7 +1279,7 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
|
|||
if (from_regs.cr3 != to_regs.cr3)
|
||||
write_cr3(to_regs.cr3);
|
||||
|
||||
to_thread->set_cpu(processor.get_id());
|
||||
to_thread->set_cpu(processor.id());
|
||||
|
||||
auto in_critical = to_thread->saved_critical();
|
||||
VERIFY(in_critical > 0);
|
||||
|
|
|
@ -267,7 +267,7 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
|
|||
:: [new_esp] "g" (regs.esp),
|
||||
[new_eip] "a" (regs.eip),
|
||||
[from_to_thread] "b" (&initial_thread),
|
||||
[cpu] "c" (id())
|
||||
[cpu] "c" (Processor::current_id())
|
||||
);
|
||||
// clang-format on
|
||||
|
||||
|
|
|
@ -530,7 +530,7 @@ private:
|
|||
[&](Processor& proc) {
|
||||
auto& info = proc.info();
|
||||
auto obj = array.add_object();
|
||||
obj.add("processor", proc.get_id());
|
||||
obj.add("processor", proc.id());
|
||||
obj.add("cpuid", info.cpuid());
|
||||
obj.add("family", info.display_family());
|
||||
|
||||
|
|
|
@ -483,15 +483,15 @@ UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
|
|||
|
||||
void APIC::broadcast_ipi()
|
||||
{
|
||||
dbgln_if(APIC_SMP_DEBUG, "SMP: Broadcast IPI from CPU #{}", Processor::id());
|
||||
dbgln_if(APIC_SMP_DEBUG, "SMP: Broadcast IPI from CPU #{}", Processor::current_id());
|
||||
wait_for_pending_icr();
|
||||
write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf));
|
||||
}
|
||||
|
||||
void APIC::send_ipi(u32 cpu)
|
||||
{
|
||||
dbgln_if(APIC_SMP_DEBUG, "SMP: Send IPI from CPU #{} to CPU #{}", Processor::id(), cpu);
|
||||
VERIFY(cpu != Processor::id());
|
||||
dbgln_if(APIC_SMP_DEBUG, "SMP: Send IPI from CPU #{} to CPU #{}", Processor::current_id(), cpu);
|
||||
VERIFY(cpu != Processor::current_id());
|
||||
VERIFY(cpu < 8);
|
||||
wait_for_pending_icr();
|
||||
write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand, cpu));
|
||||
|
@ -575,7 +575,7 @@ u32 APIC::get_timer_divisor()
|
|||
|
||||
bool APICIPIInterruptHandler::handle_interrupt(const RegisterState&)
|
||||
{
|
||||
dbgln_if(APIC_SMP_DEBUG, "APIC IPI on CPU #{}", Processor::id());
|
||||
dbgln_if(APIC_SMP_DEBUG, "APIC IPI on CPU #{}", Processor::current_id());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -588,7 +588,7 @@ bool APICIPIInterruptHandler::eoi()
|
|||
|
||||
bool APICErrInterruptHandler::handle_interrupt(const RegisterState&)
|
||||
{
|
||||
dbgln("APIC: SMP error on CPU #{}", Processor::id());
|
||||
dbgln("APIC: SMP error on CPU #{}", Processor::current_id());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -687,11 +687,11 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
|
|||
VERIFY_INTERRUPTS_DISABLED();
|
||||
if (Processor::current_in_irq()) {
|
||||
dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
|
||||
Processor::id(), fault.code(), fault.vaddr(), Processor::current_in_irq());
|
||||
Processor::current_id(), fault.code(), fault.vaddr(), Processor::current_in_irq());
|
||||
dump_kernel_regions();
|
||||
return PageFaultResponse::ShouldCrash;
|
||||
}
|
||||
dbgln_if(PAGE_FAULT_DEBUG, "MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::id(), fault.code(), fault.vaddr());
|
||||
dbgln_if(PAGE_FAULT_DEBUG, "MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::current_id(), fault.code(), fault.vaddr());
|
||||
auto* region = find_region_from_vaddr(fault.vaddr());
|
||||
if (!region) {
|
||||
return PageFaultResponse::ShouldCrash;
|
||||
|
@ -1008,7 +1008,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
|
|||
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
|
||||
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
|
||||
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
||||
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
||||
|
||||
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
|
||||
|
@ -1028,7 +1028,7 @@ void MemoryManager::unquickmap_page()
|
|||
SpinlockLocker lock(s_mm_lock);
|
||||
auto& mm_data = get_data();
|
||||
VERIFY(mm_data.m_quickmap_in_use.is_locked());
|
||||
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
|
||||
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
||||
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
||||
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
|
||||
pte.clear();
|
||||
|
|
|
@ -77,7 +77,7 @@ static inline u32 thread_priority_to_priority_index(u32 thread_priority)
|
|||
|
||||
Thread& Scheduler::pull_next_runnable_thread()
|
||||
{
|
||||
auto affinity_mask = 1u << Processor::id();
|
||||
auto affinity_mask = 1u << Processor::current_id();
|
||||
|
||||
return g_ready_queues->with([&](auto& ready_queues) -> Thread& {
|
||||
auto priority_mask = ready_queues.mask;
|
||||
|
@ -116,7 +116,7 @@ Thread& Scheduler::pull_next_runnable_thread()
|
|||
|
||||
Thread* Scheduler::peek_next_runnable_thread()
|
||||
{
|
||||
auto affinity_mask = 1u << Processor::id();
|
||||
auto affinity_mask = 1u << Processor::current_id();
|
||||
|
||||
return g_ready_queues->with([&](auto& ready_queues) -> Thread* {
|
||||
auto priority_mask = ready_queues.mask;
|
||||
|
@ -154,7 +154,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (check_affinity && !(thread.affinity() & (1 << Processor::id())))
|
||||
if (check_affinity && !(thread.affinity() & (1 << Processor::current_id())))
|
||||
return false;
|
||||
|
||||
VERIFY(ready_queues.mask & (1u << priority));
|
||||
|
@ -204,7 +204,7 @@ UNMAP_AFTER_INIT void Scheduler::start()
|
|||
idle_thread.set_initialized(true);
|
||||
processor.init_context(idle_thread, false);
|
||||
idle_thread.set_state(Thread::Running);
|
||||
VERIFY(idle_thread.affinity() == (1u << processor.get_id()));
|
||||
VERIFY(idle_thread.affinity() == (1u << processor.id()));
|
||||
processor.initialize_context_switching(idle_thread);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ bool Scheduler::pick_next()
|
|||
auto& thread_to_schedule = pull_next_runnable_thread();
|
||||
if constexpr (SCHEDULER_DEBUG) {
|
||||
dbgln("Scheduler[{}]: Switch to {} @ {:#04x}:{:p}",
|
||||
Processor::id(),
|
||||
Processor::current_id(),
|
||||
thread_to_schedule,
|
||||
thread_to_schedule.regs().cs, thread_to_schedule.regs().ip());
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ bool Scheduler::yield()
|
|||
InterruptDisabler disabler;
|
||||
|
||||
auto current_thread = Thread::current();
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::id(), *current_thread, Processor::current_in_irq());
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::current_id(), *current_thread, Processor::current_in_irq());
|
||||
VERIFY(current_thread != nullptr);
|
||||
if (Processor::current_in_irq() || Processor::in_critical()) {
|
||||
// If we're handling an IRQ we can't switch context, or we're in
|
||||
|
@ -268,7 +268,7 @@ bool Scheduler::yield()
|
|||
return false;
|
||||
|
||||
if constexpr (SCHEDULER_DEBUG)
|
||||
dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current_in_irq());
|
||||
dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::current_id(), *current_thread, Processor::current_in_irq());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -294,7 +294,7 @@ bool Scheduler::context_switch(Thread* thread)
|
|||
const auto msg = "Scheduler[{}]: {} -> {} [prio={}] {:#04x}:{:p}";
|
||||
|
||||
dbgln(msg,
|
||||
Processor::id(), from_thread->tid().value(),
|
||||
Processor::current_id(), from_thread->tid().value(),
|
||||
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().ip());
|
||||
#endif
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
|
|||
|
||||
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
|
||||
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::current_id(), *current_thread);
|
||||
current_thread->set_state(Thread::Dying);
|
||||
Processor::current().invoke_scheduler_async();
|
||||
return;
|
||||
|
@ -500,7 +500,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
|
|||
// time slice and let it run!
|
||||
current_thread->set_ticks_left(time_slice_for(*current_thread));
|
||||
current_thread->did_schedule();
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: No other threads ready, give {} another timeslice", Processor::id(), *current_thread);
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: No other threads ready, give {} another timeslice", Processor::current_id(), *current_thread);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -530,7 +530,7 @@ void Scheduler::notify_finalizer()
|
|||
void Scheduler::idle_loop(void*)
|
||||
{
|
||||
auto& proc = Processor::current();
|
||||
dbgln("Scheduler[{}]: idle loop running", proc.get_id());
|
||||
dbgln("Scheduler[{}]: idle loop running", proc.id());
|
||||
VERIFY(are_interrupts_enabled());
|
||||
|
||||
for (;;) {
|
||||
|
@ -542,7 +542,7 @@ void Scheduler::idle_loop(void*)
|
|||
#if SCHEDULE_ON_ALL_PROCESSORS
|
||||
yield();
|
||||
#else
|
||||
if (Processor::id() == 0)
|
||||
if (Processor::current_id() == 0)
|
||||
yield();
|
||||
#endif
|
||||
}
|
||||
|
@ -566,7 +566,7 @@ TotalTimeScheduled Scheduler::get_total_time_scheduled()
|
|||
|
||||
void dump_thread_list(bool with_stack_traces)
|
||||
{
|
||||
dbgln("Scheduler thread list for processor {}:", Processor::id());
|
||||
dbgln("Scheduler thread list for processor {}:", Processor::current_id());
|
||||
|
||||
auto get_cs = [](Thread& thread) -> u16 {
|
||||
if (!thread.current_trap())
|
||||
|
|
Loading…
Reference in a new issue