diff --git a/Kernel/Arch/DeferredCallPool.cpp b/Kernel/Arch/DeferredCallPool.cpp new file mode 100644 index 00000000000..7e2b8efd683 --- /dev/null +++ b/Kernel/Arch/DeferredCallPool.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2020, Tom + * Copyright (c) 2023, Timon Kruiper + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include + +namespace Kernel { + +void DeferredCallPool::init() +{ + size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]); + for (size_t i = 0; i < pool_count; i++) { + auto& entry = m_deferred_call_pool[i]; + entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr; + new (entry.handler_storage) DeferredCallEntry::HandlerFunction; + entry.was_allocated = false; + } + m_pending_deferred_calls = nullptr; + m_free_deferred_call_pool_entry = &m_deferred_call_pool[0]; +} + +void DeferredCallPool::return_to_pool(DeferredCallEntry* entry) +{ + VERIFY(!entry->was_allocated); + + entry->handler_value() = {}; + + entry->next = m_free_deferred_call_pool_entry; + m_free_deferred_call_pool_entry = entry; +} + +DeferredCallEntry* DeferredCallPool::get_free() +{ + if (m_free_deferred_call_pool_entry) { + // Fast path, we have an entry in our pool + auto* entry = m_free_deferred_call_pool_entry; + m_free_deferred_call_pool_entry = entry->next; + VERIFY(!entry->was_allocated); + return entry; + } + + auto* entry = new DeferredCallEntry; + new (entry->handler_storage) DeferredCallEntry::HandlerFunction; + entry->was_allocated = true; + return entry; +} + +void DeferredCallPool::execute_pending() +{ + if (!m_pending_deferred_calls) + return; + auto* pending_list = m_pending_deferred_calls; + m_pending_deferred_calls = nullptr; + + // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first + auto reverse_list = [](DeferredCallEntry* list) -> DeferredCallEntry* { + DeferredCallEntry* rev_list = nullptr; + while (list) { + auto next = list->next; + list->next = rev_list; + rev_list = list; + list = next; + } + return rev_list; + }; + pending_list = reverse_list(pending_list); + + do { + pending_list->invoke_handler(); + + // Return the entry back to the pool, or free it + auto* next = pending_list->next; + if (pending_list->was_allocated) { + pending_list->handler_value().~Function(); + delete pending_list; + } else + return_to_pool(pending_list); + pending_list = next; + } while (pending_list); +} + +void DeferredCallPool::queue_entry(DeferredCallEntry* entry) +{ + entry->next = m_pending_deferred_calls; + m_pending_deferred_calls = entry; +} + +} diff --git a/Kernel/Arch/DeferredCallPool.h b/Kernel/Arch/DeferredCallPool.h new file mode 100644 index 00000000000..d7d6886c702 --- /dev/null +++ b/Kernel/Arch/DeferredCallPool.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2020, Tom + * Copyright (c) 2023, Timon Kruiper + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include + +namespace Kernel { + +class DeferredCallPool { +public: + void init(); + void execute_pending(); + DeferredCallEntry* get_free(); + void return_to_pool(DeferredCallEntry*); + void queue_entry(DeferredCallEntry*); + +private: + DeferredCallEntry* m_pending_deferred_calls; // in reverse order + DeferredCallEntry* m_free_deferred_call_pool_entry; + DeferredCallEntry m_deferred_call_pool[5]; +}; + +} diff --git a/Kernel/Arch/aarch64/Processor.cpp b/Kernel/Arch/aarch64/Processor.cpp index 04b643c3485..320346da510 100644 --- a/Kernel/Arch/aarch64/Processor.cpp +++ b/Kernel/Arch/aarch64/Processor.cpp @@ -91,6 +91,8 @@ void Processor::early_initialize(u32 cpu) void Processor::initialize(u32) { + m_deferred_call_pool.init(); + dmesgln("CPU[{}]: Supports {}", m_cpu, build_cpu_feature_names(m_features)); dmesgln("CPU[{}]: Physical address bit width: {}", m_cpu, m_physical_address_bit_width); dmesgln("CPU[{}]: Virtual address bit width: {}", m_cpu, m_virtual_address_bit_width); @@ -132,7 +134,7 @@ void Processor::do_leave_critical() VERIFY(m_in_critical > 0); if (m_in_critical == 1) { if (m_in_irq == 0) { - // FIXME: Call deferred_call_execute_pending()! + m_deferred_call_pool.execute_pending(); VERIFY(m_in_critical == 1); } m_in_critical = 0; @@ -402,6 +404,10 @@ void Processor::exit_trap(TrapFrame& trap) // FIXME: Figure out if we need prev_irq_level, see duplicated code in Kernel/Arch/x86/common/Processor.cpp m_in_irq = 0; + // Process the deferred call queue. Among other things, this ensures + // that any pending thread unblocks happen before we enter the scheduler. + m_deferred_call_pool.execute_pending(); + auto* current_thread = Processor::current_thread(); if (current_thread) { auto& current_trap = current_thread->current_trap(); @@ -542,4 +548,17 @@ void Processor::set_thread_specific_data(VirtualAddress thread_specific_data) Aarch64::Asm::set_tpidr_el0(thread_specific_data.get()); } +void Processor::deferred_call_queue(Function callback) +{ + // NOTE: If we are called outside of a critical section and outside + // of an irq handler, the function will be executed before we return! + ScopedCritical critical; + auto& cur_proc = Processor::current(); + + auto* entry = cur_proc.m_deferred_call_pool.get_free(); + entry->handler_value() = move(callback); + + cur_proc.m_deferred_call_pool.queue_entry(entry); +} + } diff --git a/Kernel/Arch/aarch64/Processor.h b/Kernel/Arch/aarch64/Processor.h index f6d3bf1a9e6..f79ea3ef42f 100644 --- a/Kernel/Arch/aarch64/Processor.h +++ b/Kernel/Arch/aarch64/Processor.h @@ -11,6 +11,8 @@ #include #include +#include +#include #include #include #include @@ -255,10 +257,7 @@ public: return Processor::current_id() == 0; } - static void deferred_call_queue(Function /* callback */) - { - TODO_AARCH64(); - } + static void deferred_call_queue(Function); static u32 smp_wake_n_idle_processors(u32 wake_count); @@ -282,6 +281,8 @@ private: void do_leave_critical(); + DeferredCallPool m_deferred_call_pool {}; + u32 m_cpu; CPUFeature::Type m_features; u8 m_physical_address_bit_width; diff --git a/Kernel/Arch/x86_64/Processor.cpp b/Kernel/Arch/x86_64/Processor.cpp index 762bb7a6b89..f0fafca567b 100644 --- a/Kernel/Arch/x86_64/Processor.cpp +++ b/Kernel/Arch/x86_64/Processor.cpp @@ -626,7 +626,7 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu) g_total_processors.fetch_add(1u, AK::MemoryOrder::memory_order_acq_rel); } - deferred_call_pool_init(); + m_deferred_call_pool.init(); cpu_setup(); gdt_init(); @@ -948,7 +948,7 @@ void Processor::exit_trap(TrapFrame& trap) // Process the deferred call queue. Among other things, this ensures // that any pending thread unblocks happen before we enter the scheduler. - deferred_call_execute_pending(); + m_deferred_call_pool.execute_pending(); auto* current_thread = Processor::current_thread(); if (current_thread) { @@ -1345,92 +1345,6 @@ void Processor::Processor::halt() halt_this(); } -UNMAP_AFTER_INIT void Processor::deferred_call_pool_init() -{ - size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]); - for (size_t i = 0; i < pool_count; i++) { - auto& entry = m_deferred_call_pool[i]; - entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr; - new (entry.handler_storage) DeferredCallEntry::HandlerFunction; - entry.was_allocated = false; - } - m_pending_deferred_calls = nullptr; - m_free_deferred_call_pool_entry = &m_deferred_call_pool[0]; -} - -void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry) -{ - VERIFY(m_in_critical); - VERIFY(!entry->was_allocated); - - entry->handler_value() = {}; - - entry->next = m_free_deferred_call_pool_entry; - m_free_deferred_call_pool_entry = entry; -} - -DeferredCallEntry* Processor::deferred_call_get_free() -{ - VERIFY(m_in_critical); - - if (m_free_deferred_call_pool_entry) { - // Fast path, we have an entry in our pool - auto* entry = m_free_deferred_call_pool_entry; - m_free_deferred_call_pool_entry = entry->next; - VERIFY(!entry->was_allocated); - return entry; - } - - auto* entry = new DeferredCallEntry; - new (entry->handler_storage) DeferredCallEntry::HandlerFunction; - entry->was_allocated = true; - return entry; -} - -void Processor::deferred_call_execute_pending() -{ - VERIFY(m_in_critical); - - if (!m_pending_deferred_calls) - return; - auto* pending_list = m_pending_deferred_calls; - m_pending_deferred_calls = nullptr; - - // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first - auto reverse_list = - [](DeferredCallEntry* list) -> DeferredCallEntry* { - DeferredCallEntry* rev_list = nullptr; - while (list) { - auto next = list->next; - list->next = rev_list; - rev_list = list; - list = next; - } - return rev_list; - }; - pending_list = reverse_list(pending_list); - - do { - pending_list->invoke_handler(); - - // Return the entry back to the pool, or free it - auto* next = pending_list->next; - if (pending_list->was_allocated) { - pending_list->handler_value().~Function(); - delete pending_list; - } else - deferred_call_return_to_pool(pending_list); - pending_list = next; - } while (pending_list); -} - -void Processor::deferred_call_queue_entry(DeferredCallEntry* entry) -{ - VERIFY(m_in_critical); - entry->next = m_pending_deferred_calls; - m_pending_deferred_calls = entry; -} - void Processor::deferred_call_queue(Function callback) { // NOTE: If we are called outside of a critical section and outside @@ -1438,10 +1352,10 @@ void Processor::deferred_call_queue(Function callback) ScopedCritical critical; auto& cur_proc = Processor::current(); - auto* entry = cur_proc.deferred_call_get_free(); + auto* entry = cur_proc.m_deferred_call_pool.get_free(); entry->handler_value() = move(callback); - cur_proc.deferred_call_queue_entry(entry); + cur_proc.m_deferred_call_pool.queue_entry(entry); } UNMAP_AFTER_INIT void Processor::gdt_init() @@ -1601,7 +1515,7 @@ void Processor::do_leave_critical() VERIFY(m_in_critical > 0); if (m_in_critical == 1) { if (m_in_irq == 0) { - deferred_call_execute_pending(); + m_deferred_call_pool.execute_pending(); VERIFY(m_in_critical == 1); } m_in_critical = 0; diff --git a/Kernel/Arch/x86_64/Processor.h b/Kernel/Arch/x86_64/Processor.h index 66832eb9265..9f9c9886a36 100644 --- a/Kernel/Arch/x86_64/Processor.h +++ b/Kernel/Arch/x86_64/Processor.h @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -102,9 +103,7 @@ class Processor { bool m_in_scheduler; Atomic m_halt_requested; - DeferredCallEntry* m_pending_deferred_calls; // in reverse order - DeferredCallEntry* m_free_deferred_call_pool_entry; - DeferredCallEntry m_deferred_call_pool[5]; + DeferredCallPool m_deferred_call_pool {}; void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count]; @@ -122,12 +121,6 @@ class Processor { static void smp_broadcast_wait_sync(ProcessorMessage& msg); static void smp_broadcast_halt(); - void deferred_call_pool_init(); - void deferred_call_execute_pending(); - DeferredCallEntry* deferred_call_get_free(); - void deferred_call_return_to_pool(DeferredCallEntry*); - void deferred_call_queue_entry(DeferredCallEntry*); - void cpu_detect(); void cpu_setup(); diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index eb2b580486b..a94d124afa4 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -18,6 +18,7 @@ set(KERNEL_HEAP_SOURCES set(KERNEL_SOURCES AddressSanitizer.cpp Arch/PageFault.cpp + Arch/DeferredCallPool.cpp Bus/PCI/Controller/HostController.cpp Bus/PCI/Controller/MemoryBackedHostBridge.cpp Bus/PCI/Controller/VolumeManagementDevice.cpp