Kernel: Move deferred call code into separate DeferredCallPool class

This allows us to share this code between the x86_64 and aarch64 build.
This commit is contained in:
Timon Kruiper 2023-02-22 23:32:00 +01:00 committed by Andrew Kaster
parent 1f68ac600c
commit c31dc82b17
Notes: sideshowbarker 2024-07-17 23:02:37 +09:00
7 changed files with 154 additions and 105 deletions

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2020, Tom <tomut@yahoo.com>
* Copyright (c) 2023, Timon Kruiper <timonkruiper@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/DeferredCallEntry.h>
#include <Kernel/Arch/DeferredCallPool.h>
#include <Kernel/Heap/kmalloc.h>
namespace Kernel {
void DeferredCallPool::init()
{
size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]);
for (size_t i = 0; i < pool_count; i++) {
auto& entry = m_deferred_call_pool[i];
entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
new (entry.handler_storage) DeferredCallEntry::HandlerFunction;
entry.was_allocated = false;
}
m_pending_deferred_calls = nullptr;
m_free_deferred_call_pool_entry = &m_deferred_call_pool[0];
}
void DeferredCallPool::return_to_pool(DeferredCallEntry* entry)
{
VERIFY(!entry->was_allocated);
entry->handler_value() = {};
entry->next = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry;
}
DeferredCallEntry* DeferredCallPool::get_free()
{
if (m_free_deferred_call_pool_entry) {
// Fast path, we have an entry in our pool
auto* entry = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry->next;
VERIFY(!entry->was_allocated);
return entry;
}
auto* entry = new DeferredCallEntry;
new (entry->handler_storage) DeferredCallEntry::HandlerFunction;
entry->was_allocated = true;
return entry;
}
void DeferredCallPool::execute_pending()
{
if (!m_pending_deferred_calls)
return;
auto* pending_list = m_pending_deferred_calls;
m_pending_deferred_calls = nullptr;
// We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
auto reverse_list = [](DeferredCallEntry* list) -> DeferredCallEntry* {
DeferredCallEntry* rev_list = nullptr;
while (list) {
auto next = list->next;
list->next = rev_list;
rev_list = list;
list = next;
}
return rev_list;
};
pending_list = reverse_list(pending_list);
do {
pending_list->invoke_handler();
// Return the entry back to the pool, or free it
auto* next = pending_list->next;
if (pending_list->was_allocated) {
pending_list->handler_value().~Function();
delete pending_list;
} else
return_to_pool(pending_list);
pending_list = next;
} while (pending_list);
}
void DeferredCallPool::queue_entry(DeferredCallEntry* entry)
{
entry->next = m_pending_deferred_calls;
m_pending_deferred_calls = entry;
}
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2020, Tom <tomut@yahoo.com>
* Copyright (c) 2023, Timon Kruiper <timonkruiper@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/DeferredCallEntry.h>
namespace Kernel {
class DeferredCallPool {
public:
void init();
void execute_pending();
DeferredCallEntry* get_free();
void return_to_pool(DeferredCallEntry*);
void queue_entry(DeferredCallEntry*);
private:
DeferredCallEntry* m_pending_deferred_calls; // in reverse order
DeferredCallEntry* m_free_deferred_call_pool_entry;
DeferredCallEntry m_deferred_call_pool[5];
};
}

View file

@ -91,6 +91,8 @@ void Processor::early_initialize(u32 cpu)
void Processor::initialize(u32)
{
m_deferred_call_pool.init();
dmesgln("CPU[{}]: Supports {}", m_cpu, build_cpu_feature_names(m_features));
dmesgln("CPU[{}]: Physical address bit width: {}", m_cpu, m_physical_address_bit_width);
dmesgln("CPU[{}]: Virtual address bit width: {}", m_cpu, m_virtual_address_bit_width);
@ -132,7 +134,7 @@ void Processor::do_leave_critical()
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (m_in_irq == 0) {
// FIXME: Call deferred_call_execute_pending()!
m_deferred_call_pool.execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;
@ -402,6 +404,10 @@ void Processor::exit_trap(TrapFrame& trap)
// FIXME: Figure out if we need prev_irq_level, see duplicated code in Kernel/Arch/x86/common/Processor.cpp
m_in_irq = 0;
// Process the deferred call queue. Among other things, this ensures
// that any pending thread unblocks happen before we enter the scheduler.
m_deferred_call_pool.execute_pending();
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
@ -542,4 +548,17 @@ void Processor::set_thread_specific_data(VirtualAddress thread_specific_data)
Aarch64::Asm::set_tpidr_el0(thread_specific_data.get());
}
void Processor::deferred_call_queue(Function<void()> callback)
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
ScopedCritical critical;
auto& cur_proc = Processor::current();
auto* entry = cur_proc.m_deferred_call_pool.get_free();
entry->handler_value() = move(callback);
cur_proc.m_deferred_call_pool.queue_entry(entry);
}
}

View file

@ -11,6 +11,8 @@
#include <AK/Function.h>
#include <AK/Types.h>
#include <Kernel/Arch/DeferredCallEntry.h>
#include <Kernel/Arch/DeferredCallPool.h>
#include <Kernel/Arch/ProcessorSpecificDataID.h>
#include <Kernel/Arch/aarch64/CPUID.h>
#include <Kernel/Arch/aarch64/Registers.h>
@ -255,10 +257,7 @@ public:
return Processor::current_id() == 0;
}
static void deferred_call_queue(Function<void()> /* callback */)
{
TODO_AARCH64();
}
static void deferred_call_queue(Function<void()>);
static u32 smp_wake_n_idle_processors(u32 wake_count);
@ -282,6 +281,8 @@ private:
void do_leave_critical();
DeferredCallPool m_deferred_call_pool {};
u32 m_cpu;
CPUFeature::Type m_features;
u8 m_physical_address_bit_width;

View file

@ -626,7 +626,7 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
g_total_processors.fetch_add(1u, AK::MemoryOrder::memory_order_acq_rel);
}
deferred_call_pool_init();
m_deferred_call_pool.init();
cpu_setup();
gdt_init();
@ -948,7 +948,7 @@ void Processor::exit_trap(TrapFrame& trap)
// Process the deferred call queue. Among other things, this ensures
// that any pending thread unblocks happen before we enter the scheduler.
deferred_call_execute_pending();
m_deferred_call_pool.execute_pending();
auto* current_thread = Processor::current_thread();
if (current_thread) {
@ -1345,92 +1345,6 @@ void Processor::Processor::halt()
halt_this();
}
UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
{
size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]);
for (size_t i = 0; i < pool_count; i++) {
auto& entry = m_deferred_call_pool[i];
entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
new (entry.handler_storage) DeferredCallEntry::HandlerFunction;
entry.was_allocated = false;
}
m_pending_deferred_calls = nullptr;
m_free_deferred_call_pool_entry = &m_deferred_call_pool[0];
}
void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
{
VERIFY(m_in_critical);
VERIFY(!entry->was_allocated);
entry->handler_value() = {};
entry->next = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry;
}
DeferredCallEntry* Processor::deferred_call_get_free()
{
VERIFY(m_in_critical);
if (m_free_deferred_call_pool_entry) {
// Fast path, we have an entry in our pool
auto* entry = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry->next;
VERIFY(!entry->was_allocated);
return entry;
}
auto* entry = new DeferredCallEntry;
new (entry->handler_storage) DeferredCallEntry::HandlerFunction;
entry->was_allocated = true;
return entry;
}
void Processor::deferred_call_execute_pending()
{
VERIFY(m_in_critical);
if (!m_pending_deferred_calls)
return;
auto* pending_list = m_pending_deferred_calls;
m_pending_deferred_calls = nullptr;
// We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
auto reverse_list =
[](DeferredCallEntry* list) -> DeferredCallEntry* {
DeferredCallEntry* rev_list = nullptr;
while (list) {
auto next = list->next;
list->next = rev_list;
rev_list = list;
list = next;
}
return rev_list;
};
pending_list = reverse_list(pending_list);
do {
pending_list->invoke_handler();
// Return the entry back to the pool, or free it
auto* next = pending_list->next;
if (pending_list->was_allocated) {
pending_list->handler_value().~Function();
delete pending_list;
} else
deferred_call_return_to_pool(pending_list);
pending_list = next;
} while (pending_list);
}
void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
{
VERIFY(m_in_critical);
entry->next = m_pending_deferred_calls;
m_pending_deferred_calls = entry;
}
void Processor::deferred_call_queue(Function<void()> callback)
{
// NOTE: If we are called outside of a critical section and outside
@ -1438,10 +1352,10 @@ void Processor::deferred_call_queue(Function<void()> callback)
ScopedCritical critical;
auto& cur_proc = Processor::current();
auto* entry = cur_proc.deferred_call_get_free();
auto* entry = cur_proc.m_deferred_call_pool.get_free();
entry->handler_value() = move(callback);
cur_proc.deferred_call_queue_entry(entry);
cur_proc.m_deferred_call_pool.queue_entry(entry);
}
UNMAP_AFTER_INIT void Processor::gdt_init()
@ -1601,7 +1515,7 @@ void Processor::do_leave_critical()
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (m_in_irq == 0) {
deferred_call_execute_pending();
m_deferred_call_pool.execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;

View file

@ -12,6 +12,7 @@
#include <AK/Types.h>
#include <Kernel/Arch/DeferredCallEntry.h>
#include <Kernel/Arch/DeferredCallPool.h>
#include <Kernel/Arch/ProcessorSpecificDataID.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
#include <Kernel/Arch/x86_64/CPUID.h>
@ -102,9 +103,7 @@ class Processor {
bool m_in_scheduler;
Atomic<bool> m_halt_requested;
DeferredCallEntry* m_pending_deferred_calls; // in reverse order
DeferredCallEntry* m_free_deferred_call_pool_entry;
DeferredCallEntry m_deferred_call_pool[5];
DeferredCallPool m_deferred_call_pool {};
void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count];
@ -122,12 +121,6 @@ class Processor {
static void smp_broadcast_wait_sync(ProcessorMessage& msg);
static void smp_broadcast_halt();
void deferred_call_pool_init();
void deferred_call_execute_pending();
DeferredCallEntry* deferred_call_get_free();
void deferred_call_return_to_pool(DeferredCallEntry*);
void deferred_call_queue_entry(DeferredCallEntry*);
void cpu_detect();
void cpu_setup();

View file

@ -18,6 +18,7 @@ set(KERNEL_HEAP_SOURCES
set(KERNEL_SOURCES
AddressSanitizer.cpp
Arch/PageFault.cpp
Arch/DeferredCallPool.cpp
Bus/PCI/Controller/HostController.cpp
Bus/PCI/Controller/MemoryBackedHostBridge.cpp
Bus/PCI/Controller/VolumeManagementDevice.cpp