APIC.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/Memory.h>
  8. #include <AK/Singleton.h>
  9. #include <AK/Types.h>
  10. #include <Kernel/ACPI/Parser.h>
  11. #include <Kernel/Arch/x86/MSR.h>
  12. #include <Kernel/Arch/x86/ProcessorInfo.h>
  13. #include <Kernel/Debug.h>
  14. #include <Kernel/IO.h>
  15. #include <Kernel/Interrupts/APIC.h>
  16. #include <Kernel/Interrupts/SpuriousInterruptHandler.h>
  17. #include <Kernel/Memory/AnonymousVMObject.h>
  18. #include <Kernel/Memory/MemoryManager.h>
  19. #include <Kernel/Memory/PageDirectory.h>
  20. #include <Kernel/Memory/TypedMapping.h>
  21. #include <Kernel/Panic.h>
  22. #include <Kernel/Sections.h>
  23. #include <Kernel/Thread.h>
  24. #include <Kernel/Time/APICTimer.h>
  25. #define IRQ_APIC_TIMER (0xfc - IRQ_VECTOR_BASE)
  26. #define IRQ_APIC_IPI (0xfd - IRQ_VECTOR_BASE)
  27. #define IRQ_APIC_ERR (0xfe - IRQ_VECTOR_BASE)
  28. #define IRQ_APIC_SPURIOUS (0xff - IRQ_VECTOR_BASE)
  29. #define APIC_ICR_DELIVERY_PENDING (1 << 12)
  30. #define APIC_ENABLED (1 << 8)
  31. #define APIC_BASE_MSR 0x1b
  32. #define APIC_REGS_MSR_BASE 0x800
  33. #define APIC_REG_ID 0x20
  34. #define APIC_REG_EOI 0xb0
  35. #define APIC_REG_LD 0xd0
  36. #define APIC_REG_DF 0xe0
  37. #define APIC_REG_SIV 0xf0
  38. #define APIC_REG_TPR 0x80
  39. #define APIC_REG_ICR_LOW 0x300
  40. #define APIC_REG_ICR_HIGH 0x310
  41. #define APIC_REG_LVT_TIMER 0x320
  42. #define APIC_REG_LVT_THERMAL 0x330
  43. #define APIC_REG_LVT_PERFORMANCE_COUNTER 0x340
  44. #define APIC_REG_LVT_LINT0 0x350
  45. #define APIC_REG_LVT_LINT1 0x360
  46. #define APIC_REG_LVT_ERR 0x370
  47. #define APIC_REG_TIMER_INITIAL_COUNT 0x380
  48. #define APIC_REG_TIMER_CURRENT_COUNT 0x390
  49. #define APIC_REG_TIMER_CONFIGURATION 0x3e0
  50. namespace Kernel {
  51. static Singleton<APIC> s_apic;
  52. class APICIPIInterruptHandler final : public GenericInterruptHandler {
  53. public:
  54. explicit APICIPIInterruptHandler(u8 interrupt_vector)
  55. : GenericInterruptHandler(interrupt_vector, true)
  56. {
  57. }
  58. virtual ~APICIPIInterruptHandler()
  59. {
  60. }
  61. static void initialize(u8 interrupt_number)
  62. {
  63. auto* handler = new APICIPIInterruptHandler(interrupt_number);
  64. handler->register_interrupt_handler();
  65. }
  66. virtual bool handle_interrupt(const RegisterState&) override;
  67. virtual bool eoi() override;
  68. virtual HandlerType type() const override { return HandlerType::IRQHandler; }
  69. virtual StringView purpose() const override { return "IPI Handler"; }
  70. virtual StringView controller() const override { return nullptr; }
  71. virtual size_t sharing_devices_count() const override { return 0; }
  72. virtual bool is_shared_handler() const override { return false; }
  73. virtual bool is_sharing_with_others() const override { return false; }
  74. private:
  75. };
  76. class APICErrInterruptHandler final : public GenericInterruptHandler {
  77. public:
  78. explicit APICErrInterruptHandler(u8 interrupt_vector)
  79. : GenericInterruptHandler(interrupt_vector, true)
  80. {
  81. }
  82. virtual ~APICErrInterruptHandler()
  83. {
  84. }
  85. static void initialize(u8 interrupt_number)
  86. {
  87. auto* handler = new APICErrInterruptHandler(interrupt_number);
  88. handler->register_interrupt_handler();
  89. }
  90. virtual bool handle_interrupt(const RegisterState&) override;
  91. virtual bool eoi() override;
  92. virtual HandlerType type() const override { return HandlerType::IRQHandler; }
  93. virtual StringView purpose() const override { return "SMP Error Handler"; }
  94. virtual StringView controller() const override { return nullptr; }
  95. virtual size_t sharing_devices_count() const override { return 0; }
  96. virtual bool is_shared_handler() const override { return false; }
  97. virtual bool is_sharing_with_others() const override { return false; }
  98. private:
  99. };
  100. bool APIC::initialized()
  101. {
  102. return s_apic.is_initialized();
  103. }
  104. APIC& APIC::the()
  105. {
  106. VERIFY(APIC::initialized());
  107. return *s_apic;
  108. }
  109. UNMAP_AFTER_INIT void APIC::initialize()
  110. {
  111. VERIFY(!APIC::initialized());
  112. s_apic.ensure_instance();
  113. }
  114. PhysicalAddress APIC::get_base()
  115. {
  116. MSR msr(APIC_BASE_MSR);
  117. auto base = msr.get();
  118. return PhysicalAddress(base & 0xfffff000);
  119. }
  120. void APIC::set_base(const PhysicalAddress& base)
  121. {
  122. MSR msr(APIC_BASE_MSR);
  123. u64 flags = 1 << 11;
  124. if (m_is_x2)
  125. flags |= 1 << 10;
  126. msr.set(base.get() | flags);
  127. }
  128. void APIC::write_register(u32 offset, u32 value)
  129. {
  130. if (m_is_x2) {
  131. MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
  132. msr.set(value);
  133. } else {
  134. *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr()) = value;
  135. }
  136. }
  137. u32 APIC::read_register(u32 offset)
  138. {
  139. if (m_is_x2) {
  140. MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
  141. return (u32)msr.get();
  142. }
  143. return *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr());
  144. }
  145. void APIC::set_lvt(u32 offset, u8 interrupt)
  146. {
  147. write_register(offset, read_register(offset) | interrupt);
  148. }
  149. void APIC::set_siv(u32 offset, u8 interrupt)
  150. {
  151. write_register(offset, read_register(offset) | interrupt | APIC_ENABLED);
  152. }
  153. void APIC::wait_for_pending_icr()
  154. {
  155. while ((read_register(APIC_REG_ICR_LOW) & APIC_ICR_DELIVERY_PENDING) != 0) {
  156. IO::delay(200);
  157. }
  158. }
  159. void APIC::write_icr(const ICRReg& icr)
  160. {
  161. if (m_is_x2) {
  162. MSR msr(APIC_REGS_MSR_BASE + (APIC_REG_ICR_LOW >> 4));
  163. msr.set(icr.x2_value());
  164. } else {
  165. write_register(APIC_REG_ICR_HIGH, icr.x_high());
  166. write_register(APIC_REG_ICR_LOW, icr.x_low());
  167. }
  168. }
  169. #define APIC_LVT_TIMER_ONESHOT 0
  170. #define APIC_LVT_TIMER_PERIODIC (1 << 17)
  171. #define APIC_LVT_TIMER_TSCDEADLINE (1 << 18)
  172. #define APIC_LVT_MASKED (1 << 16)
  173. #define APIC_LVT_TRIGGER_LEVEL (1 << 14)
  174. #define APIC_LVT(iv, dm) (((iv)&0xff) | (((dm)&0x7) << 8))
  175. extern "C" void apic_ap_start(void);
  176. extern "C" u16 apic_ap_start_size;
  177. extern "C" u32 ap_cpu_init_stacks;
  178. extern "C" u32 ap_cpu_init_processor_info_array;
  179. extern "C" u32 ap_cpu_init_cr0;
  180. extern "C" u32 ap_cpu_init_cr3;
  181. extern "C" u32 ap_cpu_init_cr4;
  182. extern "C" u32 ap_cpu_gdtr;
  183. extern "C" u32 ap_cpu_idtr;
  184. void APIC::eoi()
  185. {
  186. write_register(APIC_REG_EOI, 0x0);
  187. }
  188. u8 APIC::spurious_interrupt_vector()
  189. {
  190. return IRQ_APIC_SPURIOUS;
  191. }
  192. #define APIC_INIT_VAR_PTR(tpe, vaddr, varname) \
  193. reinterpret_cast<volatile tpe*>(reinterpret_cast<ptrdiff_t>(vaddr) \
  194. + reinterpret_cast<ptrdiff_t>(&varname) \
  195. - reinterpret_cast<ptrdiff_t>(&apic_ap_start))
  196. UNMAP_AFTER_INIT bool APIC::init_bsp()
  197. {
  198. // FIXME: Use the ACPI MADT table
  199. if (!MSR::have())
  200. return false;
  201. // check if we support local apic
  202. CPUID id(1);
  203. if ((id.edx() & (1 << 9)) == 0)
  204. return false;
  205. if (id.ecx() & (1 << 21))
  206. m_is_x2 = true;
  207. PhysicalAddress apic_base = get_base();
  208. dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2 ? "x2" : "x", apic_base);
  209. set_base(apic_base);
  210. if (!m_is_x2) {
  211. auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
  212. if (region_or_error.is_error()) {
  213. dbgln("APIC: Failed to allocate memory for APIC base");
  214. return false;
  215. }
  216. m_apic_base = region_or_error.release_value();
  217. }
  218. auto rsdp = ACPI::StaticParsing::find_rsdp();
  219. if (!rsdp.has_value()) {
  220. dbgln("APIC: RSDP not found");
  221. return false;
  222. }
  223. auto madt_address = ACPI::StaticParsing::find_table(rsdp.value(), "APIC");
  224. if (!madt_address.has_value()) {
  225. dbgln("APIC: MADT table not found");
  226. return false;
  227. }
  228. auto madt = Memory::map_typed<ACPI::Structures::MADT>(madt_address.value());
  229. size_t entry_index = 0;
  230. size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
  231. auto* madt_entry = madt->entries;
  232. while (entries_length > 0) {
  233. size_t entry_length = madt_entry->length;
  234. if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::LocalAPIC) {
  235. auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry;
  236. dbgln_if(APIC_DEBUG, "APIC: AP found @ MADT entry {}, processor ID: {}, xAPIC ID: {}, flags: {:#08x}", entry_index, plapic_entry->acpi_processor_id, plapic_entry->apic_id, plapic_entry->flags);
  237. m_processor_cnt++;
  238. if ((plapic_entry->flags & 0x1) != 0)
  239. m_processor_enabled_cnt++;
  240. } else if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::Local_x2APIC) {
  241. // Only used for APID IDs >= 255
  242. auto* plx2apic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalX2APIC*)madt_entry;
  243. dbgln_if(APIC_DEBUG, "APIC: AP found @ MADT entry {}, processor ID: {}, x2APIC ID: {}, flags: {:#08x}", entry_index, plx2apic_entry->acpi_processor_id, plx2apic_entry->apic_id, plx2apic_entry->flags);
  244. m_processor_cnt++;
  245. if ((plx2apic_entry->flags & 0x1) != 0)
  246. m_processor_enabled_cnt++;
  247. }
  248. madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
  249. entries_length -= entry_length;
  250. entry_index++;
  251. }
  252. if (m_processor_enabled_cnt < 1)
  253. m_processor_enabled_cnt = 1;
  254. if (m_processor_cnt < 1)
  255. m_processor_cnt = 1;
  256. dbgln("APIC processors found: {}, enabled: {}", m_processor_cnt, m_processor_enabled_cnt);
  257. enable(0);
  258. return true;
  259. }
  260. UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_region(PhysicalAddress paddr, size_t size)
  261. {
  262. auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size);
  263. // FIXME: Would be nice to be able to return a KResultOr from here.
  264. VERIFY(!maybe_vmobject.is_error());
  265. auto region_or_error = MM.allocate_kernel_region_with_vmobject(
  266. Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size },
  267. maybe_vmobject.release_value(),
  268. {},
  269. Memory::Region::Access::ReadWriteExecute);
  270. VERIFY(!region_or_error.is_error());
  271. return region_or_error.release_value();
  272. }
  273. UNMAP_AFTER_INIT void APIC::do_boot_aps()
  274. {
  275. VERIFY(m_processor_enabled_cnt > 1);
  276. u32 aps_to_enable = m_processor_enabled_cnt - 1;
  277. // Copy the APIC startup code and variables to P0x00008000
  278. // Also account for the data appended to:
  279. // * aps_to_enable u32 values for ap_cpu_init_stacks
  280. // * aps_to_enable u32 values for ap_cpu_init_processor_info_array
  281. auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))));
  282. memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size);
  283. // Allocate enough stacks for all APs
  284. Vector<OwnPtr<Memory::Region>> apic_ap_stacks;
  285. for (u32 i = 0; i < aps_to_enable; i++) {
  286. auto stack_region_or_error = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
  287. if (stack_region_or_error.is_error()) {
  288. dbgln("APIC: Failed to allocate stack for AP #{}", i);
  289. return;
  290. }
  291. auto stack_region = stack_region_or_error.release_value();
  292. stack_region->set_stack(true);
  293. apic_ap_stacks.append(move(stack_region));
  294. }
  295. // Store pointers to all stacks for the APs to use
  296. auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks);
  297. VERIFY(aps_to_enable == apic_ap_stacks.size());
  298. for (size_t i = 0; i < aps_to_enable; i++) {
  299. ap_stack_array[i] = apic_ap_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size;
  300. dbgln_if(APIC_DEBUG, "APIC: CPU[{}] stack at {}", i + 1, VirtualAddress { ap_stack_array[i] });
  301. }
  302. // Allocate Processor structures for all APs and store the pointer to the data
  303. m_ap_processor_info.resize(aps_to_enable);
  304. for (size_t i = 0; i < aps_to_enable; i++)
  305. m_ap_processor_info[i] = make<Processor>();
  306. auto ap_processor_info_array = &ap_stack_array[aps_to_enable];
  307. for (size_t i = 0; i < aps_to_enable; i++) {
  308. ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr());
  309. dbgln_if(APIC_DEBUG, "APIC: CPU[{}] processor at {}", i + 1, VirtualAddress { ap_processor_info_array[i] });
  310. }
  311. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_processor_info_array) = FlatPtr(&ap_processor_info_array[0]);
  312. // Store the BSP's CR3 value for the APs to use
  313. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr3) = MM.kernel_page_directory().cr3();
  314. // Store the BSP's GDT and IDT for the APs to use
  315. const auto& gdtr = Processor::current().get_gdtr();
  316. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_gdtr) = FlatPtr(&gdtr);
  317. const auto& idtr = get_idtr();
  318. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_idtr) = FlatPtr(&idtr);
  319. // Store the BSP's CR0 and CR4 values for the APs to use
  320. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr0) = read_cr0();
  321. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr4) = read_cr4();
  322. // Create an idle thread for each processor. We have to do this here
  323. // because we won't be able to send FlushTLB messages, so we have to
  324. // have all memory set up for the threads so that when the APs are
  325. // starting up, they can access all the memory properly
  326. m_ap_idle_threads.resize(aps_to_enable);
  327. for (u32 i = 0; i < aps_to_enable; i++)
  328. m_ap_idle_threads[i] = Scheduler::create_ap_idle_thread(i + 1);
  329. dbgln_if(APIC_DEBUG, "APIC: Starting {} AP(s)", aps_to_enable);
  330. // INIT
  331. write_icr({ 0, 0, ICRReg::INIT, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf });
  332. IO::delay(10 * 1000);
  333. for (int i = 0; i < 2; i++) {
  334. // SIPI
  335. write_icr({ 0x08, 0, ICRReg::StartUp, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf }); // start execution at P8000
  336. IO::delay(200);
  337. }
  338. // Now wait until the ap_cpu_init_pending variable dropped to 0, which means all APs are initialized and no longer need these special mappings
  339. if (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable) {
  340. dbgln_if(APIC_DEBUG, "APIC: Waiting for {} AP(s) to finish initialization...", aps_to_enable);
  341. do {
  342. // Wait a little bit
  343. IO::delay(200);
  344. } while (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable);
  345. }
  346. dbgln_if(APIC_DEBUG, "APIC: {} processors are initialized and running", m_processor_enabled_cnt);
  347. // NOTE: Since this region is identity-mapped, we have to unmap it manually to prevent the virtual
  348. // address range from leaking into the general virtual range allocator.
  349. apic_startup_region->unmap(Memory::Region::ShouldDeallocateVirtualRange::No);
  350. }
  351. UNMAP_AFTER_INIT void APIC::boot_aps()
  352. {
  353. if (m_processor_enabled_cnt <= 1)
  354. return;
  355. // We split this into another call because do_boot_aps() will cause
  356. // MM calls upon exit, and we don't want to call smp_enable before that
  357. do_boot_aps();
  358. // Enable SMP, which means IPIs may now be sent
  359. Processor::smp_enable();
  360. dbgln_if(APIC_DEBUG, "All processors initialized and waiting, trigger all to continue");
  361. // Now trigger all APs to continue execution (need to do this after
  362. // the regions have been freed so that we don't trigger IPIs
  363. m_apic_ap_continue.store(1, AK::MemoryOrder::memory_order_release);
  364. }
  365. UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
  366. {
  367. VERIFY(m_is_x2 || cpu < 8);
  368. u32 apic_id;
  369. if (m_is_x2) {
  370. dbgln_if(APIC_DEBUG, "Enable x2APIC on CPU #{}", cpu);
  371. // We need to enable x2 mode on each core independently
  372. set_base(get_base());
  373. apic_id = read_register(APIC_REG_ID);
  374. } else {
  375. dbgln_if(APIC_DEBUG, "Setting logical xAPIC ID for CPU #{}", cpu);
  376. // Use the CPU# as logical apic id
  377. VERIFY(cpu <= 8);
  378. write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (cpu << 24));
  379. // read it back to make sure it's actually set
  380. apic_id = read_register(APIC_REG_LD) >> 24;
  381. }
  382. dbgln_if(APIC_DEBUG, "CPU #{} apic id: {}", cpu, apic_id);
  383. Processor::current().info().set_apic_id(apic_id);
  384. dbgln_if(APIC_DEBUG, "Enabling local APIC for CPU #{}, logical APIC ID: {}", cpu, apic_id);
  385. if (cpu == 0) {
  386. SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
  387. APICErrInterruptHandler::initialize(IRQ_APIC_ERR);
  388. // register IPI interrupt vector
  389. APICIPIInterruptHandler::initialize(IRQ_APIC_IPI);
  390. }
  391. if (!m_is_x2) {
  392. // local destination mode (flat mode), not supported in x2 mode
  393. write_register(APIC_REG_DF, 0xf0000000);
  394. }
  395. // set error interrupt vector
  396. set_lvt(APIC_REG_LVT_ERR, IRQ_APIC_ERR);
  397. // set spurious interrupt vector
  398. set_siv(APIC_REG_SIV, IRQ_APIC_SPURIOUS);
  399. write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
  400. write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
  401. write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
  402. write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
  403. write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
  404. write_register(APIC_REG_TPR, 0);
  405. }
  406. Thread* APIC::get_idle_thread(u32 cpu) const
  407. {
  408. VERIFY(cpu > 0);
  409. return m_ap_idle_threads[cpu - 1];
  410. }
  411. UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
  412. {
  413. // This method is called once the boot stack is no longer needed
  414. VERIFY(cpu > 0);
  415. VERIFY(cpu < m_processor_enabled_cnt);
  416. // Since we're waiting on other APs here, we shouldn't have the
  417. // scheduler lock
  418. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  419. // Notify the BSP that we are done initializing. It will unmap the startup data at P8000
  420. m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  421. dbgln_if(APIC_DEBUG, "APIC: CPU #{} initialized, waiting for all others", cpu);
  422. // The reason we're making all APs wait until the BSP signals them is that
  423. // we don't want APs to trigger IPIs (e.g. through MM) while the BSP
  424. // is unable to process them
  425. while (!m_apic_ap_continue.load(AK::MemoryOrder::memory_order_consume)) {
  426. IO::delay(200);
  427. }
  428. dbgln_if(APIC_DEBUG, "APIC: CPU #{} continues, all others are initialized", cpu);
  429. // do_boot_aps() freed memory, so we need to update our tlb
  430. Processor::flush_entire_tlb_local();
  431. // Now enable all the interrupts
  432. APIC::the().enable(cpu);
  433. }
  434. void APIC::broadcast_ipi()
  435. {
  436. dbgln_if(APIC_SMP_DEBUG, "SMP: Broadcast IPI from CPU #{}", Processor::current_id());
  437. wait_for_pending_icr();
  438. write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, 0xffffffff, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf });
  439. }
  440. void APIC::send_ipi(u32 cpu)
  441. {
  442. dbgln_if(APIC_SMP_DEBUG, "SMP: Send IPI from CPU #{} to CPU #{}", Processor::current_id(), cpu);
  443. VERIFY(cpu != Processor::current_id());
  444. VERIFY(cpu < Processor::count());
  445. wait_for_pending_icr();
  446. write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2 ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2 ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand });
  447. }
  448. UNMAP_AFTER_INIT APICTimer* APIC::initialize_timers(HardwareTimerBase& calibration_timer)
  449. {
  450. if (!m_apic_base && !m_is_x2)
  451. return nullptr;
  452. // We should only initialize and calibrate the APIC timer once on the BSP!
  453. VERIFY(Processor::is_bootstrap_processor());
  454. VERIFY(!m_apic_timer);
  455. m_apic_timer = APICTimer::initialize(IRQ_APIC_TIMER, calibration_timer);
  456. return m_apic_timer;
  457. }
  458. void APIC::setup_local_timer(u32 ticks, TimerMode timer_mode, bool enable)
  459. {
  460. u32 flags = 0;
  461. switch (timer_mode) {
  462. case TimerMode::OneShot:
  463. flags |= APIC_LVT_TIMER_ONESHOT;
  464. break;
  465. case TimerMode::Periodic:
  466. flags |= APIC_LVT_TIMER_PERIODIC;
  467. break;
  468. case TimerMode::TSCDeadline:
  469. flags |= APIC_LVT_TIMER_TSCDEADLINE;
  470. break;
  471. }
  472. if (!enable)
  473. flags |= APIC_LVT_MASKED;
  474. write_register(APIC_REG_LVT_TIMER, APIC_LVT(IRQ_APIC_TIMER + IRQ_VECTOR_BASE, 0) | flags);
  475. u32 config = read_register(APIC_REG_TIMER_CONFIGURATION);
  476. config &= ~0xf; // clear divisor (bits 0-3)
  477. switch (get_timer_divisor()) {
  478. case 1:
  479. config |= (1 << 3) | 3;
  480. break;
  481. case 2:
  482. break;
  483. case 4:
  484. config |= 1;
  485. break;
  486. case 8:
  487. config |= 2;
  488. break;
  489. case 16:
  490. config |= 3;
  491. break;
  492. case 32:
  493. config |= (1 << 3);
  494. break;
  495. case 64:
  496. config |= (1 << 3) | 1;
  497. break;
  498. case 128:
  499. config |= (1 << 3) | 2;
  500. break;
  501. default:
  502. VERIFY_NOT_REACHED();
  503. }
  504. write_register(APIC_REG_TIMER_CONFIGURATION, config);
  505. if (timer_mode == TimerMode::Periodic)
  506. write_register(APIC_REG_TIMER_INITIAL_COUNT, ticks / get_timer_divisor());
  507. }
  508. u32 APIC::get_timer_current_count()
  509. {
  510. return read_register(APIC_REG_TIMER_CURRENT_COUNT);
  511. }
  512. u32 APIC::get_timer_divisor()
  513. {
  514. return 16;
  515. }
  516. bool APICIPIInterruptHandler::handle_interrupt(const RegisterState&)
  517. {
  518. dbgln_if(APIC_SMP_DEBUG, "APIC IPI on CPU #{}", Processor::current_id());
  519. return true;
  520. }
  521. bool APICIPIInterruptHandler::eoi()
  522. {
  523. dbgln_if(APIC_SMP_DEBUG, "SMP: IPI EOI");
  524. APIC::the().eoi();
  525. return true;
  526. }
  527. bool APICErrInterruptHandler::handle_interrupt(const RegisterState&)
  528. {
  529. dbgln("APIC: SMP error on CPU #{}", Processor::current_id());
  530. return true;
  531. }
  532. bool APICErrInterruptHandler::eoi()
  533. {
  534. APIC::the().eoi();
  535. return true;
  536. }
  537. bool HardwareTimer<GenericInterruptHandler>::eoi()
  538. {
  539. APIC::the().eoi();
  540. return true;
  541. }
  542. }