APIC.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/Memory.h>
  28. #include <AK/Singleton.h>
  29. #include <AK/StringView.h>
  30. #include <AK/Types.h>
  31. #include <Kernel/ACPI/Parser.h>
  32. #include <Kernel/Arch/i386/CPU.h>
  33. #include <Kernel/Arch/i386/ProcessorInfo.h>
  34. #include <Kernel/IO.h>
  35. #include <Kernel/Interrupts/APIC.h>
  36. #include <Kernel/Interrupts/SpuriousInterruptHandler.h>
  37. #include <Kernel/Thread.h>
  38. #include <Kernel/VM/MemoryManager.h>
  39. #include <Kernel/VM/PageDirectory.h>
  40. #include <Kernel/VM/TypedMapping.h>
  41. //#define APIC_DEBUG
  42. //#define APIC_SMP_DEBUG
  43. #define IRQ_APIC_IPI (0xfd - IRQ_VECTOR_BASE)
  44. #define IRQ_APIC_ERR (0xfe - IRQ_VECTOR_BASE)
  45. #define IRQ_APIC_SPURIOUS (0xff - IRQ_VECTOR_BASE)
  46. #define APIC_ICR_DELIVERY_PENDING (1 << 12)
  47. #define APIC_ENABLED (1 << 8)
  48. #define APIC_BASE_MSR 0x1b
  49. #define APIC_REG_EOI 0xb0
  50. #define APIC_REG_LD 0xd0
  51. #define APIC_REG_DF 0xe0
  52. #define APIC_REG_SIV 0xf0
  53. #define APIC_REG_TPR 0x80
  54. #define APIC_REG_ICR_LOW 0x300
  55. #define APIC_REG_ICR_HIGH 0x310
  56. #define APIC_REG_LVT_TIMER 0x320
  57. #define APIC_REG_LVT_THERMAL 0x330
  58. #define APIC_REG_LVT_PERFORMANCE_COUNTER 0x340
  59. #define APIC_REG_LVT_LINT0 0x350
  60. #define APIC_REG_LVT_LINT1 0x360
  61. #define APIC_REG_LVT_ERR 0x370
  62. namespace Kernel {
  63. static AK::Singleton<APIC> s_apic;
  64. class APICIPIInterruptHandler final : public GenericInterruptHandler {
  65. public:
  66. explicit APICIPIInterruptHandler(u8 interrupt_vector)
  67. : GenericInterruptHandler(interrupt_vector, true)
  68. {
  69. }
  70. virtual ~APICIPIInterruptHandler()
  71. {
  72. }
  73. static void initialize(u8 interrupt_number)
  74. {
  75. new APICIPIInterruptHandler(interrupt_number);
  76. }
  77. virtual void handle_interrupt(const RegisterState&) override;
  78. virtual bool eoi() override;
  79. virtual HandlerType type() const override { return HandlerType::IRQHandler; }
  80. virtual const char* purpose() const override { return "IPI Handler"; }
  81. virtual const char* controller() const override { return nullptr; }
  82. virtual size_t sharing_devices_count() const override { return 0; }
  83. virtual bool is_shared_handler() const override { return false; }
  84. virtual bool is_sharing_with_others() const override { return false; }
  85. private:
  86. };
  87. class APICErrInterruptHandler final : public GenericInterruptHandler {
  88. public:
  89. explicit APICErrInterruptHandler(u8 interrupt_vector)
  90. : GenericInterruptHandler(interrupt_vector, true)
  91. {
  92. }
  93. virtual ~APICErrInterruptHandler()
  94. {
  95. }
  96. static void initialize(u8 interrupt_number)
  97. {
  98. new APICErrInterruptHandler(interrupt_number);
  99. }
  100. virtual void handle_interrupt(const RegisterState&) override;
  101. virtual bool eoi() override;
  102. virtual HandlerType type() const override { return HandlerType::IRQHandler; }
  103. virtual const char* purpose() const override { return "SMP Error Handler"; }
  104. virtual const char* controller() const override { return nullptr; }
  105. virtual size_t sharing_devices_count() const override { return 0; }
  106. virtual bool is_shared_handler() const override { return false; }
  107. virtual bool is_sharing_with_others() const override { return false; }
  108. private:
  109. };
  110. bool APIC::initialized()
  111. {
  112. return s_apic.is_initialized();
  113. }
  114. APIC& APIC::the()
  115. {
  116. ASSERT(APIC::initialized());
  117. return *s_apic;
  118. }
  119. void APIC::initialize()
  120. {
  121. ASSERT(!APIC::initialized());
  122. s_apic.ensure_instance();
  123. }
  124. PhysicalAddress APIC::get_base()
  125. {
  126. u32 lo, hi;
  127. MSR msr(APIC_BASE_MSR);
  128. msr.get(lo, hi);
  129. return PhysicalAddress(lo & 0xfffff000);
  130. }
  131. void APIC::set_base(const PhysicalAddress& base)
  132. {
  133. u32 hi = 0;
  134. u32 lo = base.get() | 0x800;
  135. MSR msr(APIC_BASE_MSR);
  136. msr.set(lo, hi);
  137. }
  138. void APIC::write_register(u32 offset, u32 value)
  139. {
  140. *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr()) = value;
  141. }
  142. u32 APIC::read_register(u32 offset)
  143. {
  144. return *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr());
  145. }
  146. void APIC::set_lvt(u32 offset, u8 interrupt)
  147. {
  148. write_register(offset, (read_register(offset) & 0xffffffff) | interrupt);
  149. }
  150. void APIC::set_siv(u32 offset, u8 interrupt)
  151. {
  152. write_register(offset, (read_register(offset) & 0xffffffff) | interrupt | APIC_ENABLED);
  153. }
  154. void APIC::wait_for_pending_icr()
  155. {
  156. while ((read_register(APIC_REG_ICR_LOW) & APIC_ICR_DELIVERY_PENDING) != 0) {
  157. IO::delay(200);
  158. }
  159. }
  160. void APIC::write_icr(const ICRReg& icr)
  161. {
  162. write_register(APIC_REG_ICR_HIGH, icr.high());
  163. write_register(APIC_REG_ICR_LOW, icr.low());
  164. }
  165. #define APIC_LVT_MASKED (1 << 16)
  166. #define APIC_LVT_TRIGGER_LEVEL (1 << 14)
  167. #define APIC_LVT(iv, dm) ((iv & 0xff) | ((dm & 0x7) << 8))
  168. extern "C" void apic_ap_start(void);
  169. extern "C" u16 apic_ap_start_size;
  170. extern "C" u32 ap_cpu_init_stacks;
  171. extern "C" u32 ap_cpu_init_processor_info_array;
  172. extern "C" u32 ap_cpu_init_cr0;
  173. extern "C" u32 ap_cpu_init_cr3;
  174. extern "C" u32 ap_cpu_init_cr4;
  175. extern "C" u32 ap_cpu_gdtr;
  176. extern "C" u32 ap_cpu_idtr;
  177. void APIC::eoi()
  178. {
  179. write_register(APIC_REG_EOI, 0x0);
  180. }
  181. u8 APIC::spurious_interrupt_vector()
  182. {
  183. return IRQ_APIC_SPURIOUS;
  184. }
  185. #define APIC_INIT_VAR_PTR(tpe, vaddr, varname) \
  186. reinterpret_cast<volatile tpe*>(reinterpret_cast<ptrdiff_t>(vaddr) \
  187. + reinterpret_cast<ptrdiff_t>(&varname) \
  188. - reinterpret_cast<ptrdiff_t>(&apic_ap_start))
  189. bool APIC::init_bsp()
  190. {
  191. // FIXME: Use the ACPI MADT table
  192. if (!MSR::have())
  193. return false;
  194. // check if we support local apic
  195. CPUID id(1);
  196. if ((id.edx() & (1 << 9)) == 0)
  197. return false;
  198. PhysicalAddress apic_base = get_base();
  199. #ifdef APIC_DEBUG
  200. klog() << "Initializing APIC, base: " << apic_base;
  201. #endif
  202. set_base(apic_base);
  203. m_apic_base = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Region::Access::Read | Region::Access::Write);
  204. if (!m_apic_base) {
  205. klog() << "APIC: Failed to allocate memory for APIC base";
  206. return false;
  207. }
  208. auto rsdp = ACPI::StaticParsing::find_rsdp();
  209. if (!rsdp.has_value()) {
  210. klog() << "APIC: RSDP not found";
  211. return false;
  212. }
  213. auto madt_address = ACPI::StaticParsing::find_table(rsdp.value(), "APIC");
  214. if (madt_address.is_null()) {
  215. klog() << "APIC: MADT table not found";
  216. return false;
  217. }
  218. auto madt = map_typed<ACPI::Structures::MADT>(madt_address);
  219. size_t entry_index = 0;
  220. size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
  221. auto* madt_entry = madt->entries;
  222. while (entries_length > 0) {
  223. size_t entry_length = madt_entry->length;
  224. if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::LocalAPIC) {
  225. auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry;
  226. #ifdef APIC_DEBUG
  227. klog() << "APIC: AP found @ MADT entry " << entry_index << ", Processor Id: " << String::format("%02x", plapic_entry->acpi_processor_id)
  228. << " APIC Id: " << String::format("%02x", plapic_entry->apic_id) << " Flags: " << String::format("%08x", plapic_entry->flags);
  229. #endif
  230. m_processor_cnt++;
  231. if ((plapic_entry->flags & 0x1) != 0)
  232. m_processor_enabled_cnt++;
  233. }
  234. madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
  235. entries_length -= entry_length;
  236. entry_index++;
  237. }
  238. if (m_processor_enabled_cnt < 1)
  239. m_processor_enabled_cnt = 1;
  240. if (m_processor_cnt < 1)
  241. m_processor_cnt = 1;
  242. klog() << "APIC Processors found: " << m_processor_cnt << ", enabled: " << m_processor_enabled_cnt;
  243. enable(0);
  244. return true;
  245. }
  246. void APIC::do_boot_aps()
  247. {
  248. ASSERT(m_processor_enabled_cnt > 1);
  249. u32 aps_to_enable = m_processor_enabled_cnt - 1;
  250. // Copy the APIC startup code and variables to P0x00008000
  251. // Also account for the data appended to:
  252. // * aps_to_enable u32 values for ap_cpu_init_stacks
  253. // * aps_to_enable u32 values for ap_cpu_init_processor_info_array
  254. auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), PAGE_ROUND_UP(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Region::Access::Read | Region::Access::Write | Region::Access::Execute);
  255. memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size);
  256. // Allocate enough stacks for all APs
  257. Vector<OwnPtr<Region>> apic_ap_stacks;
  258. for (u32 i = 0; i < aps_to_enable; i++) {
  259. auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, true, true);
  260. if (!stack_region) {
  261. klog() << "APIC: Failed to allocate stack for AP #" << i;
  262. return;
  263. }
  264. stack_region->set_stack(true);
  265. apic_ap_stacks.append(move(stack_region));
  266. }
  267. // Store pointers to all stacks for the APs to use
  268. auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks);
  269. ASSERT(aps_to_enable == apic_ap_stacks.size());
  270. for (size_t i = 0; i < aps_to_enable; i++) {
  271. ap_stack_array[i] = apic_ap_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size;
  272. #ifdef APIC_DEBUG
  273. klog() << "APIC: CPU[" << (i + 1) << "] stack at " << VirtualAddress(ap_stack_array[i]);
  274. #endif
  275. }
  276. // Allocate Processor structures for all APs and store the pointer to the data
  277. m_ap_processor_info.resize(aps_to_enable);
  278. for (size_t i = 0; i < aps_to_enable; i++)
  279. m_ap_processor_info[i] = make<Processor>();
  280. auto ap_processor_info_array = &ap_stack_array[aps_to_enable];
  281. for (size_t i = 0; i < aps_to_enable; i++) {
  282. ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr());
  283. #ifdef APIC_DEBUG
  284. klog() << "APIC: CPU[" << (i + 1) << "] Processor at " << VirtualAddress(ap_processor_info_array[i]);
  285. #endif
  286. }
  287. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_processor_info_array) = FlatPtr(&ap_processor_info_array[0]);
  288. // Store the BSP's CR3 value for the APs to use
  289. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr3) = MM.kernel_page_directory().cr3();
  290. // Store the BSP's GDT and IDT for the APs to use
  291. const auto& gdtr = Processor::current().get_gdtr();
  292. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_gdtr) = FlatPtr(&gdtr);
  293. const auto& idtr = get_idtr();
  294. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_idtr) = FlatPtr(&idtr);
  295. // Store the BSP's CR0 and CR4 values for the APs to use
  296. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr0) = read_cr0();
  297. *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr4) = read_cr4();
  298. // Create an idle thread for each processor. We have to do this here
  299. // because we won't be able to send FlushTLB messages, so we have to
  300. // have all memory set up for the threads so that when the APs are
  301. // starting up, they can access all the memory properly
  302. m_ap_idle_threads.resize(aps_to_enable);
  303. for (u32 i = 0; i < aps_to_enable; i++)
  304. m_ap_idle_threads[i] = Scheduler::create_ap_idle_thread(i + 1);
  305. #ifdef APIC_DEBUG
  306. klog() << "APIC: Starting " << aps_to_enable << " AP(s)";
  307. #endif
  308. // INIT
  309. write_icr(ICRReg(0, ICRReg::INIT, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf));
  310. IO::delay(10 * 1000);
  311. for (int i = 0; i < 2; i++) {
  312. // SIPI
  313. write_icr(ICRReg(0x08, ICRReg::StartUp, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf)); // start execution at P8000
  314. IO::delay(200);
  315. }
  316. // Now wait until the ap_cpu_init_pending variable dropped to 0, which means all APs are initialized and no longer need these special mappings
  317. if (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable) {
  318. #ifdef APIC_DEBUG
  319. klog() << "APIC: Waiting for " << aps_to_enable << " AP(s) to finish initialization...";
  320. #endif
  321. do {
  322. // Wait a little bit
  323. IO::delay(200);
  324. } while (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable);
  325. }
  326. #ifdef APIC_DEBUG
  327. klog() << "APIC: " << m_processor_enabled_cnt << " processors are initialized and running";
  328. #endif
  329. }
  330. void APIC::boot_aps()
  331. {
  332. if (m_processor_enabled_cnt <= 1)
  333. return;
  334. // We split this into another call because do_boot_aps() will cause
  335. // MM calls upon exit, and we don't want to call smp_enable before that
  336. do_boot_aps();
  337. // Enable SMP, which means IPIs may now be sent
  338. Processor::smp_enable();
  339. #ifdef APIC_DEBUG
  340. dbg() << "All processors initialized and waiting, trigger all to continue";
  341. #endif
  342. // Now trigger all APs to continue execution (need to do this after
  343. // the regions have been freed so that we don't trigger IPIs
  344. m_apic_ap_continue.store(1, AK::MemoryOrder::memory_order_release);
  345. }
  346. void APIC::enable(u32 cpu)
  347. {
  348. if (cpu >= 8) {
  349. // TODO: x2apic support?
  350. klog() << "SMP support is currently limited to 8 CPUs!";
  351. Processor::halt();
  352. }
  353. u32 apic_id = (1u << cpu);
  354. write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (apic_id << 24)); // TODO: only if not in x2apic mode
  355. // read it back to make sure it's actually set
  356. apic_id = read_register(APIC_REG_LD) >> 24;
  357. Processor::current().info().set_apic_id(apic_id);
  358. #ifdef APIC_DEBUG
  359. klog() << "Enabling local APIC for cpu #" << cpu << " apic id: " << apic_id;
  360. #endif
  361. if (cpu == 0) {
  362. SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
  363. // set error interrupt vector
  364. set_lvt(APIC_REG_LVT_ERR, IRQ_APIC_ERR);
  365. APICErrInterruptHandler::initialize(IRQ_APIC_ERR);
  366. // register IPI interrupt vector
  367. APICIPIInterruptHandler::initialize(IRQ_APIC_IPI);
  368. }
  369. // set spurious interrupt vector
  370. set_siv(APIC_REG_SIV, IRQ_APIC_SPURIOUS);
  371. // local destination mode (flat mode)
  372. write_register(APIC_REG_DF, 0xf0000000);
  373. write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
  374. write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
  375. write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
  376. write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
  377. write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
  378. write_register(APIC_REG_TPR, 0);
  379. }
  380. Thread* APIC::get_idle_thread(u32 cpu) const
  381. {
  382. ASSERT(cpu > 0);
  383. return m_ap_idle_threads[cpu - 1];
  384. }
  385. void APIC::init_finished(u32 cpu)
  386. {
  387. // This method is called once the boot stack is no longer needed
  388. ASSERT(cpu > 0);
  389. ASSERT(cpu < m_processor_enabled_cnt);
  390. // Since we're waiting on other APs here, we shouldn't have the
  391. // scheduler lock
  392. ASSERT(!g_scheduler_lock.own_lock());
  393. // Notify the BSP that we are done initializing. It will unmap the startup data at P8000
  394. m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
  395. #ifdef APIC_DEBUG
  396. klog() << "APIC: cpu #" << cpu << " initialized, waiting for all others";
  397. #endif
  398. // The reason we're making all APs wait until the BSP signals them is that
  399. // we don't want APs to trigger IPIs (e.g. through MM) while the BSP
  400. // is unable to process them
  401. while (!m_apic_ap_continue.load(AK::MemoryOrder::memory_order_consume)) {
  402. IO::delay(200);
  403. }
  404. #ifdef APIC_DEBUG
  405. klog() << "APIC: cpu #" << cpu << " continues, all others are initialized";
  406. #endif
  407. // do_boot_aps() freed memory, so we need to update our tlb
  408. Processor::flush_entire_tlb_local();
  409. // Now enable all the interrupts
  410. APIC::the().enable(cpu);
  411. }
  412. void APIC::broadcast_ipi()
  413. {
  414. #ifdef APIC_SMP_DEBUG
  415. klog() << "SMP: Broadcast IPI from cpu #" << Processor::current().id();
  416. #endif
  417. wait_for_pending_icr();
  418. write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf));
  419. }
  420. void APIC::send_ipi(u32 cpu)
  421. {
  422. auto& proc = Processor::current();
  423. #ifdef APIC_SMP_DEBUG
  424. klog() << "SMP: Send IPI from cpu #" << proc.id() << " to cpu #" << cpu;
  425. #endif
  426. ASSERT(cpu != proc.id());
  427. ASSERT(cpu < 8);
  428. wait_for_pending_icr();
  429. write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand, 1u << cpu));
  430. }
  431. void APICIPIInterruptHandler::handle_interrupt(const RegisterState&)
  432. {
  433. #ifdef APIC_SMP_DEBUG
  434. klog() << "APIC IPI on cpu #" << Processor::current().id();
  435. #endif
  436. }
  437. bool APICIPIInterruptHandler::eoi()
  438. {
  439. #ifdef APIC_SMP_DEBUG
  440. klog() << "SMP: IPI eoi";
  441. #endif
  442. APIC::the().eoi();
  443. return true;
  444. }
  445. void APICErrInterruptHandler::handle_interrupt(const RegisterState&)
  446. {
  447. klog() << "APIC: SMP error on cpu #" << Processor::current().id();
  448. }
  449. bool APICErrInterruptHandler::eoi()
  450. {
  451. APIC::the().eoi();
  452. return true;
  453. }
  454. }