Processor.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2018-2021, James Mintram <me@jamesrm.com>
  3. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #pragma once
  8. #include <AK/Function.h>
  9. #include <Kernel/Arch/CPUID.h>
  10. #include <Kernel/Arch/DeferredCallEntry.h>
  11. #include <Kernel/Arch/DeferredCallPool.h>
  12. #include <Kernel/Arch/FPUState.h>
  13. #include <Kernel/Arch/ProcessorSpecificDataID.h>
  14. #include <Kernel/Memory/VirtualAddress.h>
  15. #if ARCH(X86_64)
  16. # include <Kernel/Arch/x86_64/DescriptorTable.h>
  17. #endif
  18. namespace Kernel {
  19. enum class InterruptsState {
  20. Enabled,
  21. Disabled
  22. };
  23. namespace Memory {
  24. class PageDirectory;
  25. }
  26. struct TrapFrame;
  27. class Thread;
  28. class Processor;
  29. extern Atomic<u32> g_total_processors;
  30. extern FPUState s_clean_fpu_state;
  31. // context_first_init is an architecture-specific detail with various properties.
  32. // All variants eventually call into the common code here.
  33. void do_context_first_init(Thread* from_thread, Thread* to_thread);
  34. extern "C" void exit_kernel_thread(void);
  35. extern "C" void thread_context_first_enter(void);
  36. extern "C" void do_assume_context(Thread* thread, u32 flags);
  37. extern "C" FlatPtr do_init_context(Thread* thread, u32) __attribute__((used));
  38. template<typename ProcessorT>
  39. class ProcessorBase {
  40. public:
  41. template<typename T>
  42. T* get_specific()
  43. {
  44. return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
  45. }
  46. void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
  47. {
  48. m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
  49. }
  50. static bool is_smp_enabled();
  51. static void smp_enable();
  52. static u32 smp_wake_n_idle_processors(u32 wake_count);
  53. static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
  54. static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
  55. void early_initialize(u32 cpu);
  56. void initialize(u32 cpu);
  57. ALWAYS_INLINE static bool is_initialized();
  58. [[noreturn]] static void halt();
  59. void wait_for_interrupt() const;
  60. ALWAYS_INLINE static void pause();
  61. ALWAYS_INLINE static void wait_check();
  62. ALWAYS_INLINE static ProcessorT& current();
  63. static Processor& by_id(u32);
  64. ALWAYS_INLINE u32 id() const
  65. {
  66. // NOTE: This variant should only be used when iterating over all
  67. // Processor instances, or when it's guaranteed that the thread
  68. // cannot move to another processor in between calling Processor::current
  69. // and Processor::id, or if this fact is not important.
  70. // All other cases should use Processor::current_id instead!
  71. return m_cpu;
  72. }
  73. ALWAYS_INLINE static u32 current_id();
  74. ALWAYS_INLINE static bool is_bootstrap_processor();
  75. ALWAYS_INLINE bool has_nx() const;
  76. ALWAYS_INLINE bool has_pat() const;
  77. ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
  78. {
  79. return m_features.has_flag(feature);
  80. }
  81. static StringView platform_string();
  82. static u32 count()
  83. {
  84. // NOTE: because this value never changes once all APs are booted,
  85. // we can safely bypass loading it atomically.
  86. // NOTE: This does not work on aarch64, since the variable is never written.
  87. return *g_total_processors.ptr();
  88. }
  89. void enter_trap(TrapFrame& trap, bool raise_irq);
  90. void exit_trap(TrapFrame& trap);
  91. static void flush_entire_tlb_local();
  92. ALWAYS_INLINE static Thread* current_thread();
  93. ALWAYS_INLINE static void set_current_thread(Thread& current_thread);
  94. ALWAYS_INLINE static Thread* idle_thread();
  95. ALWAYS_INLINE static u32 in_critical();
  96. ALWAYS_INLINE static void enter_critical();
  97. static void leave_critical();
  98. void do_leave_critical();
  99. static u32 clear_critical();
  100. ALWAYS_INLINE static void restore_critical(u32 prev_critical);
  101. ALWAYS_INLINE static void verify_no_spinlocks_held()
  102. {
  103. VERIFY(!ProcessorBase::in_critical());
  104. }
  105. static InterruptsState interrupts_state();
  106. static void restore_interrupts_state(InterruptsState);
  107. static bool are_interrupts_enabled();
  108. ALWAYS_INLINE static void enable_interrupts();
  109. ALWAYS_INLINE static void disable_interrupts();
  110. ALWAYS_INLINE static FlatPtr current_in_irq();
  111. ALWAYS_INLINE static bool is_kernel_mode();
  112. ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
  113. {
  114. m_idle_thread = &idle_thread;
  115. }
  116. void idle_begin() const;
  117. void idle_end() const;
  118. u64 time_spent_idle() const;
  119. ALWAYS_INLINE static u64 read_cpu_counter();
  120. void check_invoke_scheduler();
  121. void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
  122. ALWAYS_INLINE static bool current_in_scheduler();
  123. ALWAYS_INLINE static void set_current_in_scheduler(bool value);
  124. ALWAYS_INLINE bool is_in_scheduler() const { return m_in_scheduler; }
  125. ALWAYS_INLINE u8 physical_address_bit_width() const
  126. {
  127. return m_physical_address_bit_width;
  128. }
  129. ALWAYS_INLINE u8 virtual_address_bit_width() const
  130. {
  131. return m_virtual_address_bit_width;
  132. }
  133. ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
  134. static void deferred_call_queue(Function<void()> callback);
  135. static void set_thread_specific_data(VirtualAddress thread_specific_data);
  136. [[noreturn]] void initialize_context_switching(Thread& initial_thread);
  137. NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
  138. [[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
  139. FlatPtr init_context(Thread& thread, bool leave_crit);
  140. static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
  141. protected:
  142. ProcessorT* m_self;
  143. CPUFeature::Type m_features;
  144. Atomic<bool> m_halt_requested;
  145. u8 m_physical_address_bit_width;
  146. u8 m_virtual_address_bit_width;
  147. private:
  148. void* m_processor_specific_data[static_cast<size_t>(ProcessorSpecificDataID::__Count)];
  149. Thread* m_idle_thread;
  150. Thread* m_current_thread;
  151. u32 m_cpu { 0 };
  152. // FIXME: On aarch64, once there is code in place to differentiate IRQs from synchronous exceptions (syscalls),
  153. // this member should be incremented. Also this member shouldn't be a FlatPtr.
  154. FlatPtr m_in_irq { 0 };
  155. volatile u32 m_in_critical;
  156. // NOTE: Since these variables are accessed with atomic magic on x86 (through GP with a single load instruction),
  157. // they need to be FlatPtrs or everything becomes highly unsound and breaks. They are actually just booleans.
  158. FlatPtr m_in_scheduler;
  159. FlatPtr m_invoke_scheduler_async;
  160. FlatPtr m_scheduler_initialized;
  161. DeferredCallPool m_deferred_call_pool {};
  162. };
  163. template class ProcessorBase<Processor>;
  164. }
  165. #if ARCH(X86_64)
  166. # include <Kernel/Arch/x86_64/Processor.h>
  167. #elif ARCH(AARCH64)
  168. # include <Kernel/Arch/aarch64/Processor.h>
  169. #elif ARCH(RISCV64)
  170. # include <Kernel/Arch/riscv64/Processor.h>
  171. #else
  172. # error "Unknown architecture"
  173. #endif
  174. namespace Kernel {
  175. template<typename T>
  176. ALWAYS_INLINE bool ProcessorBase<T>::is_bootstrap_processor()
  177. {
  178. return current_id() == 0;
  179. }
  180. template<typename T>
  181. InterruptsState ProcessorBase<T>::interrupts_state()
  182. {
  183. return Processor::are_interrupts_enabled() ? InterruptsState::Enabled : InterruptsState::Disabled;
  184. }
  185. template<typename T>
  186. void ProcessorBase<T>::restore_interrupts_state(InterruptsState interrupts_state)
  187. {
  188. if (interrupts_state == InterruptsState::Enabled)
  189. Processor::enable_interrupts();
  190. else
  191. Processor::disable_interrupts();
  192. }
  193. struct ProcessorMessageEntry;
  194. struct ProcessorMessage {
  195. using CallbackFunction = Function<void()>;
  196. enum Type {
  197. FlushTlb,
  198. Callback,
  199. };
  200. Type type;
  201. Atomic<u32> refs;
  202. union {
  203. ProcessorMessage* next; // only valid while in the pool
  204. alignas(CallbackFunction) u8 callback_storage[sizeof(CallbackFunction)];
  205. struct {
  206. Memory::PageDirectory const* page_directory;
  207. u8* ptr;
  208. size_t page_count;
  209. } flush_tlb;
  210. };
  211. bool volatile async;
  212. ProcessorMessageEntry* per_proc_entries;
  213. CallbackFunction& callback_value()
  214. {
  215. return *bit_cast<CallbackFunction*>(&callback_storage);
  216. }
  217. void invoke_callback()
  218. {
  219. VERIFY(type == Type::Callback);
  220. callback_value()();
  221. }
  222. };
  223. struct ProcessorMessageEntry {
  224. ProcessorMessageEntry* next;
  225. ProcessorMessage* msg;
  226. };
  227. template<typename T>
  228. class ProcessorSpecific {
  229. public:
  230. static void initialize()
  231. {
  232. Processor::current().set_specific(T::processor_specific_data_id(), new T);
  233. }
  234. static T& get()
  235. {
  236. return *Processor::current().get_specific<T>();
  237. }
  238. };
  239. }