123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452 |
- /*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
- #pragma once
- #include <AK/Array.h>
- #include <AK/Concepts.h>
- #include <AK/Function.h>
- #include <AK/Types.h>
- #include <Kernel/Arch/x86/ASM_wrapper.h>
- #include <Kernel/Arch/x86/CPUID.h>
- #include <Kernel/Arch/x86/DescriptorTable.h>
- #include <Kernel/Arch/x86/PageDirectory.h>
- #include <Kernel/Arch/x86/TSS.h>
- #include <Kernel/Forward.h>
- namespace Kernel {
- class ProcessorInfo;
- struct ProcessorMessageEntry;
- enum class ProcessorSpecificDataID {
- MemoryManager,
- __Count,
- };
- #if ARCH(X86_64)
- # define MSR_FS_BASE 0xc0000100
- # define MSR_GS_BASE 0xc0000101
- #endif
- // FIXME: Find a better place for these
- extern "C" void thread_context_first_enter(void);
- extern "C" void exit_kernel_thread(void);
- extern "C" void do_assume_context(Thread* thread, u32 flags);
- struct [[gnu::aligned(16)]] FPUState
- {
- u8 buffer[512];
- };
- struct ProcessorMessage {
- using CallbackFunction = Function<void()>;
- enum Type {
- FlushTlb,
- Callback,
- };
- Type type;
- Atomic<u32> refs;
- union {
- ProcessorMessage* next; // only valid while in the pool
- alignas(CallbackFunction) u8 callback_storage[sizeof(CallbackFunction)];
- struct {
- Memory::PageDirectory const* page_directory;
- u8* ptr;
- size_t page_count;
- } flush_tlb;
- };
- volatile bool async;
- ProcessorMessageEntry* per_proc_entries;
- CallbackFunction& callback_value()
- {
- return *bit_cast<CallbackFunction*>(&callback_storage);
- }
- void invoke_callback()
- {
- VERIFY(type == Type::Callback);
- callback_value()();
- }
- };
- struct ProcessorMessageEntry {
- ProcessorMessageEntry* next;
- ProcessorMessage* msg;
- };
- struct DeferredCallEntry {
- using HandlerFunction = Function<void()>;
- DeferredCallEntry* next;
- alignas(HandlerFunction) u8 handler_storage[sizeof(HandlerFunction)];
- bool was_allocated;
- HandlerFunction& handler_value()
- {
- return *bit_cast<HandlerFunction*>(&handler_storage);
- }
- void invoke_handler()
- {
- handler_value()();
- }
- };
- class Processor;
- // Note: We only support 64 processors at most at the moment,
- // so allocate 64 slots of inline capacity in the container.
- using ProcessorContainer = Array<Processor*, 64>;
- class Processor {
- friend class ProcessorInfo;
- AK_MAKE_NONCOPYABLE(Processor);
- AK_MAKE_NONMOVABLE(Processor);
- Processor* m_self;
- DescriptorTablePointer m_gdtr;
- Descriptor m_gdt[256];
- u32 m_gdt_length;
- u32 m_cpu;
- FlatPtr m_in_irq;
- volatile u32 m_in_critical;
- static Atomic<u32> s_idle_cpu_mask;
- TSS m_tss;
- static FPUState s_clean_fpu_state;
- CPUFeature m_features;
- static Atomic<u32> g_total_processors;
- u8 m_physical_address_bit_width;
- ProcessorInfo* m_info;
- Thread* m_current_thread;
- Thread* m_idle_thread;
- Atomic<ProcessorMessageEntry*> m_message_queue;
- bool m_invoke_scheduler_async;
- bool m_scheduler_initialized;
- bool m_in_scheduler;
- Atomic<bool> m_halt_requested;
- DeferredCallEntry* m_pending_deferred_calls; // in reverse order
- DeferredCallEntry* m_free_deferred_call_pool_entry;
- DeferredCallEntry m_deferred_call_pool[5];
- void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count];
- void gdt_init();
- void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
- void write_gdt_entry(u16 selector, Descriptor& descriptor);
- static ProcessorContainer& processors();
- static void smp_return_to_pool(ProcessorMessage& msg);
- static ProcessorMessage& smp_get_from_pool();
- static void smp_cleanup_message(ProcessorMessage& msg);
- bool smp_enqueue_message(ProcessorMessage&);
- static void smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async);
- static void smp_broadcast_message(ProcessorMessage& msg);
- static void smp_broadcast_wait_sync(ProcessorMessage& msg);
- static void smp_broadcast_halt();
- void deferred_call_pool_init();
- void deferred_call_execute_pending();
- DeferredCallEntry* deferred_call_get_free();
- void deferred_call_return_to_pool(DeferredCallEntry*);
- void deferred_call_queue_entry(DeferredCallEntry*);
- void cpu_detect();
- void cpu_setup();
- String features_string() const;
- public:
- Processor() = default;
- void early_initialize(u32 cpu);
- void initialize(u32 cpu);
- void detect_hypervisor();
- void detect_hypervisor_hyperv(CPUID const& hypervisor_leaf_range);
- void idle_begin()
- {
- s_idle_cpu_mask.fetch_or(1u << m_cpu, AK::MemoryOrder::memory_order_relaxed);
- }
- void idle_end()
- {
- s_idle_cpu_mask.fetch_and(~(1u << m_cpu), AK::MemoryOrder::memory_order_relaxed);
- }
- static Processor& by_id(u32);
- static u32 count()
- {
- // NOTE: because this value never changes once all APs are booted,
- // we can safely bypass loading it atomically.
- return *g_total_processors.ptr();
- }
- ALWAYS_INLINE static void pause()
- {
- asm volatile("pause");
- }
- ALWAYS_INLINE static void wait_check()
- {
- Processor::pause();
- if (Processor::is_smp_enabled())
- Processor::current().smp_process_pending_messages();
- }
- [[noreturn]] static void halt();
- static void flush_entire_tlb_local()
- {
- write_cr3(read_cr3());
- }
- static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
- static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
- Descriptor& get_gdt_entry(u16 selector);
- void flush_gdt();
- const DescriptorTablePointer& get_gdtr();
- static size_t processor_count() { return processors().size(); }
- template<IteratorFunction<Processor&> Callback>
- static inline IterationDecision for_each(Callback callback)
- {
- auto& procs = processors();
- size_t count = procs.size();
- for (size_t i = 0; i < count; i++) {
- if (callback(*procs[i]) == IterationDecision::Break)
- return IterationDecision::Break;
- }
- return IterationDecision::Continue;
- }
- template<VoidFunction<Processor&> Callback>
- static inline IterationDecision for_each(Callback callback)
- {
- auto& procs = processors();
- size_t count = procs.size();
- for (size_t i = 0; i < count; i++) {
- if (procs[i] != nullptr)
- callback(*procs[i]);
- }
- return IterationDecision::Continue;
- }
- ALWAYS_INLINE u8 physical_address_bit_width() const { return m_physical_address_bit_width; }
- ALWAYS_INLINE ProcessorInfo& info() { return *m_info; }
- static bool is_smp_enabled();
- ALWAYS_INLINE static Processor& current()
- {
- return *(Processor*)read_gs_ptr(__builtin_offsetof(Processor, m_self));
- }
- ALWAYS_INLINE static bool is_initialized()
- {
- return
- #if ARCH(I386)
- get_gs() == GDT_SELECTOR_PROC &&
- #endif
- read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
- }
- template<typename T>
- T* get_specific()
- {
- return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
- }
- void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
- {
- m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
- }
- ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
- {
- m_idle_thread = &idle_thread;
- }
- ALWAYS_INLINE static Thread* current_thread()
- {
- // If we were to use Processor::current here, we'd have to
- // disable interrupts to prevent a race where we may get pre-empted
- // right after getting the Processor structure and then get moved
- // to another processor, which would lead us to get the wrong thread.
- // To avoid having to disable interrupts, we can just read the field
- // directly in an atomic fashion, similar to Processor::current.
- return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_current_thread));
- }
- ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
- {
- // See comment in Processor::current_thread
- write_gs_ptr(__builtin_offsetof(Processor, m_current_thread), FlatPtr(¤t_thread));
- }
- ALWAYS_INLINE static Thread* idle_thread()
- {
- // See comment in Processor::current_thread
- return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_idle_thread));
- }
- ALWAYS_INLINE u32 id() const
- {
- // NOTE: This variant should only be used when iterating over all
- // Processor instances, or when it's guaranteed that the thread
- // cannot move to another processor in between calling Processor::current
- // and Processor::get_id, or if this fact is not important.
- // All other cases should use Processor::id instead!
- return m_cpu;
- }
- ALWAYS_INLINE static u32 current_id()
- {
- // See comment in Processor::current_thread
- return read_gs_ptr(__builtin_offsetof(Processor, m_cpu));
- }
- ALWAYS_INLINE static bool is_bootstrap_processor()
- {
- return Processor::current_id() == 0;
- }
- ALWAYS_INLINE static FlatPtr current_in_irq()
- {
- return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
- }
- ALWAYS_INLINE static void restore_in_critical(u32 critical)
- {
- write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
- }
- ALWAYS_INLINE static void enter_critical()
- {
- write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
- }
- ALWAYS_INLINE static bool current_in_scheduler()
- {
- return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
- }
- ALWAYS_INLINE static void set_current_in_scheduler(bool value)
- {
- write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
- }
- private:
- ALWAYS_INLINE void do_leave_critical()
- {
- VERIFY(m_in_critical > 0);
- if (m_in_critical == 1) {
- if (!m_in_irq) {
- deferred_call_execute_pending();
- VERIFY(m_in_critical == 1);
- }
- m_in_critical = 0;
- if (!m_in_irq)
- check_invoke_scheduler();
- } else {
- m_in_critical = m_in_critical - 1;
- }
- }
- public:
- ALWAYS_INLINE static void leave_critical()
- {
- current().do_leave_critical();
- }
- ALWAYS_INLINE static u32 clear_critical()
- {
- auto prev_critical = in_critical();
- write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
- auto& proc = current();
- if (!proc.m_in_irq)
- proc.check_invoke_scheduler();
- return prev_critical;
- }
- ALWAYS_INLINE static void restore_critical(u32 prev_critical)
- {
- // NOTE: This doesn't have to be atomic, and it's also fine if we
- // get preempted in between these steps. If we move to another
- // processors m_in_critical will move along with us. And if we
- // are preempted, we would resume with the same flags.
- write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
- }
- ALWAYS_INLINE static u32 in_critical()
- {
- // See comment in Processor::current_thread
- return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
- }
- ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
- static void smp_enable();
- bool smp_process_pending_messages();
- static void smp_unicast(u32 cpu, Function<void()>, bool async);
- static void smp_broadcast_flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
- static u32 smp_wake_n_idle_processors(u32 wake_count);
- static void deferred_call_queue(Function<void()> callback);
- ALWAYS_INLINE bool has_feature(CPUFeature f) const
- {
- return (static_cast<u32>(m_features) & static_cast<u32>(f)) != 0;
- }
- void check_invoke_scheduler();
- void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
- void enter_trap(TrapFrame& trap, bool raise_irq);
- void exit_trap(TrapFrame& trap);
- [[noreturn]] void initialize_context_switching(Thread& initial_thread);
- NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
- [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
- FlatPtr init_context(Thread& thread, bool leave_crit);
- static Vector<FlatPtr> capture_stack_trace(Thread& thread, size_t max_frames = 0);
- static StringView platform_string();
- };
- template<typename T>
- class ProcessorSpecific {
- public:
- static void initialize()
- {
- Processor::current().set_specific(T::processor_specific_data_id(), new T);
- }
- static T& get()
- {
- return *Processor::current().get_specific<T>();
- }
- };
- }
|