CPU.cpp 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/ScopeGuard.h>
  28. #include <AK/String.h>
  29. #include <AK/StringBuilder.h>
  30. #include <AK/Types.h>
  31. #include <Kernel/Arch/x86/CPU.h>
  32. #include <Kernel/Arch/x86/ISRStubs.h>
  33. #include <Kernel/Arch/x86/ProcessorInfo.h>
  34. #include <Kernel/Arch/x86/SafeMem.h>
  35. #include <Kernel/Debug.h>
  36. #include <Kernel/IO.h>
  37. #include <Kernel/Interrupts/APIC.h>
  38. #include <Kernel/Interrupts/GenericInterruptHandler.h>
  39. #include <Kernel/Interrupts/SharedIRQHandler.h>
  40. #include <Kernel/Interrupts/SpuriousInterruptHandler.h>
  41. #include <Kernel/Interrupts/UnhandledInterruptHandler.h>
  42. #include <Kernel/KSyms.h>
  43. #include <Kernel/Panic.h>
  44. #include <Kernel/Process.h>
  45. #include <Kernel/Random.h>
  46. #include <Kernel/Thread.h>
  47. #include <Kernel/VM/MemoryManager.h>
  48. #include <Kernel/VM/PageDirectory.h>
  49. #include <Kernel/VM/ProcessPagingScope.h>
  50. #include <LibC/mallocdefs.h>
  51. extern FlatPtr start_of_unmap_after_init;
  52. extern FlatPtr end_of_unmap_after_init;
  53. extern FlatPtr start_of_ro_after_init;
  54. extern FlatPtr end_of_ro_after_init;
  55. namespace Kernel {
  56. READONLY_AFTER_INIT static DescriptorTablePointer s_idtr;
  57. READONLY_AFTER_INIT static Descriptor s_idt[256];
  58. static GenericInterruptHandler* s_interrupt_handler[GENERIC_INTERRUPT_HANDLERS_COUNT];
  59. static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interrupts };
  60. // The compiler can't see the calls to these functions inside assembly.
  61. // Declare them, to avoid dead code warnings.
  62. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread);
  63. extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap);
  64. extern "C" u32 do_init_context(Thread* thread, u32 flags);
  65. extern "C" void exit_kernel_thread(void);
  66. extern "C" void pre_init_finished(void);
  67. extern "C" void post_init_finished(void);
  68. extern "C" void handle_interrupt(TrapFrame*);
  69. // clang-format off
  70. #if ARCH(I386)
  71. #define EH_ENTRY(ec, title) \
  72. extern "C" void title##_asm_entry(); \
  73. extern "C" void title##_handler(TrapFrame*); \
  74. asm( \
  75. ".globl " #title "_asm_entry\n" \
  76. "" #title "_asm_entry: \n" \
  77. " pusha\n" \
  78. " pushl %ds\n" \
  79. " pushl %es\n" \
  80. " pushl %fs\n" \
  81. " pushl %gs\n" \
  82. " pushl %ss\n" \
  83. " mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
  84. " mov %ax, %ds\n" \
  85. " mov %ax, %es\n" \
  86. " mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
  87. " mov %ax, %fs\n" \
  88. " pushl %esp \n" /* set TrapFrame::regs */ \
  89. " subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
  90. " pushl %esp \n" \
  91. " cld\n" \
  92. " call enter_trap_no_irq \n" \
  93. " call " #title "_handler\n" \
  94. " jmp common_trap_exit \n");
  95. #define EH_ENTRY_NO_CODE(ec, title) \
  96. extern "C" void title##_handler(TrapFrame*); \
  97. extern "C" void title##_asm_entry(); \
  98. asm( \
  99. ".globl " #title "_asm_entry\n" \
  100. "" #title "_asm_entry: \n" \
  101. " pushl $0x0\n" \
  102. " pusha\n" \
  103. " pushl %ds\n" \
  104. " pushl %es\n" \
  105. " pushl %fs\n" \
  106. " pushl %gs\n" \
  107. " pushl %ss\n" \
  108. " mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
  109. " mov %ax, %ds\n" \
  110. " mov %ax, %es\n" \
  111. " mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
  112. " mov %ax, %fs\n" \
  113. " pushl %esp \n" /* set TrapFrame::regs */ \
  114. " subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
  115. " pushl %esp \n" \
  116. " cld\n" \
  117. " call enter_trap_no_irq \n" \
  118. " call " #title "_handler\n" \
  119. " jmp common_trap_exit \n");
  120. #elif ARCH(X86_64)
  121. #define EH_ENTRY(ec, title) \
  122. extern "C" void title##_asm_entry(); \
  123. extern "C" void title##_handler(TrapFrame*); \
  124. asm( \
  125. ".globl " #title "_asm_entry\n" \
  126. "" #title "_asm_entry: \n" \
  127. " cli;hlt;\n" \
  128. );
  129. #define EH_ENTRY_NO_CODE(ec, title) \
  130. extern "C" void title##_handler(TrapFrame*); \
  131. extern "C" void title##_asm_entry(); \
  132. asm( \
  133. ".globl " #title "_asm_entry\n" \
  134. "" #title "_asm_entry: \n" \
  135. " cli;hlt;\n" \
  136. );
  137. #endif
  138. // clang-format on
  139. static void dump(const RegisterState& regs)
  140. {
  141. u16 ss;
  142. u32 esp;
  143. if (!(regs.cs & 3)) {
  144. ss = regs.ss;
  145. esp = regs.esp;
  146. } else {
  147. ss = regs.userspace_ss;
  148. esp = regs.userspace_esp;
  149. }
  150. dbgln("Exception code: {:04x} (isr: {:04x})", regs.exception_code, regs.isr_number);
  151. dbgln(" pc={:04x}:{:08x} eflags={:08x}", (u16)regs.cs, regs.eip, regs.eflags);
  152. dbgln(" stack={:04x}:{:08x}", ss, esp);
  153. dbgln(" ds={:04x} es={:04x} fs={:04x} gs={:04x}", (u16)regs.ds, (u16)regs.es, (u16)regs.fs, (u16)regs.gs);
  154. dbgln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x}", regs.eax, regs.ebx, regs.ecx, regs.edx);
  155. dbgln(" ebp={:08x} esp={:08x} esi={:08x} edi={:08x}", regs.ebp, regs.esp, regs.esi, regs.edi);
  156. dbgln(" cr0={:08x} cr2={:08x} cr3={:08x} cr4={:08x}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
  157. }
  158. void handle_crash(RegisterState& regs, const char* description, int signal, bool out_of_memory)
  159. {
  160. auto process = Process::current();
  161. if (!process) {
  162. PANIC("{} with !current", description);
  163. }
  164. // If a process crashed while inspecting another process,
  165. // make sure we switch back to the right page tables.
  166. MM.enter_process_paging_scope(*process);
  167. dmesgln("CRASH: CPU #{} {} in ring {}", Processor::id(), description, (regs.cs & 3));
  168. dump(regs);
  169. if (!(regs.cs & 3)) {
  170. PANIC("Crash in ring 0");
  171. }
  172. process->crash(signal, regs.eip, out_of_memory);
  173. }
  174. EH_ENTRY_NO_CODE(6, illegal_instruction);
  175. void illegal_instruction_handler(TrapFrame* trap)
  176. {
  177. clac();
  178. handle_crash(*trap->regs, "Illegal instruction", SIGILL);
  179. }
  180. EH_ENTRY_NO_CODE(0, divide_error);
  181. void divide_error_handler(TrapFrame* trap)
  182. {
  183. clac();
  184. handle_crash(*trap->regs, "Divide error", SIGFPE);
  185. }
  186. EH_ENTRY(13, general_protection_fault);
  187. void general_protection_fault_handler(TrapFrame* trap)
  188. {
  189. clac();
  190. handle_crash(*trap->regs, "General protection fault", SIGSEGV);
  191. }
  192. // 7: FPU not available exception
  193. EH_ENTRY_NO_CODE(7, fpu_exception);
  194. void fpu_exception_handler(TrapFrame*)
  195. {
  196. // Just clear the TS flag. We've already restored the FPU state eagerly.
  197. // FIXME: It would be nice if we didn't have to do this at all.
  198. asm volatile("clts");
  199. }
  200. // 14: Page Fault
  201. EH_ENTRY(14, page_fault);
  202. void page_fault_handler(TrapFrame* trap)
  203. {
  204. clac();
  205. auto& regs = *trap->regs;
  206. auto fault_address = read_cr2();
  207. if constexpr (PAGE_FAULT_DEBUG) {
  208. u32 fault_page_directory = read_cr3();
  209. dbgln("CPU #{} ring {} {} page fault in PD={:#x}, {}{} {}",
  210. Processor::is_initialized() ? Processor::id() : 0,
  211. regs.cs & 3,
  212. regs.exception_code & 1 ? "PV" : "NP",
  213. fault_page_directory,
  214. regs.exception_code & 8 ? "reserved-bit " : "",
  215. regs.exception_code & 2 ? "write" : "read",
  216. VirtualAddress(fault_address));
  217. dump(regs);
  218. }
  219. bool faulted_in_kernel = !(regs.cs & 3);
  220. if (faulted_in_kernel && Processor::current().in_irq()) {
  221. // If we're faulting in an IRQ handler, first check if we failed
  222. // due to safe_memcpy, safe_strnlen, or safe_memset. If we did,
  223. // gracefully continue immediately. Because we're in an IRQ handler
  224. // we can't really try to resolve the page fault in a meaningful
  225. // way, so we need to do this before calling into
  226. // MemoryManager::handle_page_fault, which would just bail and
  227. // request a crash
  228. if (handle_safe_access_fault(regs, fault_address))
  229. return;
  230. }
  231. auto current_thread = Thread::current();
  232. if (current_thread)
  233. current_thread->set_handling_page_fault(true);
  234. ScopeGuard guard = [current_thread] {
  235. if (current_thread)
  236. current_thread->set_handling_page_fault(false);
  237. };
  238. if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), VirtualAddress(regs.userspace_esp))) {
  239. dbgln("Invalid stack pointer: {}", VirtualAddress(regs.userspace_esp));
  240. handle_crash(regs, "Bad stack on page fault", SIGSTKFLT);
  241. }
  242. if (fault_address >= (FlatPtr)&start_of_ro_after_init && fault_address < (FlatPtr)&end_of_ro_after_init) {
  243. dump(regs);
  244. PANIC("Attempt to write into READONLY_AFTER_INIT section");
  245. }
  246. if (fault_address >= (FlatPtr)&start_of_unmap_after_init && fault_address < (FlatPtr)&end_of_unmap_after_init) {
  247. dump(regs);
  248. PANIC("Attempt to access UNMAP_AFTER_INIT section");
  249. }
  250. auto response = MM.handle_page_fault(PageFault(regs.exception_code, VirtualAddress(fault_address)));
  251. if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory) {
  252. if (faulted_in_kernel && handle_safe_access_fault(regs, fault_address)) {
  253. // If this would be a ring0 (kernel) fault and the fault was triggered by
  254. // safe_memcpy, safe_strnlen, or safe_memset then we resume execution at
  255. // the appropriate _fault label rather than crashing
  256. return;
  257. }
  258. if (response != PageFaultResponse::OutOfMemory) {
  259. if (current_thread->has_signal_handler(SIGSEGV)) {
  260. current_thread->send_urgent_signal_to_self(SIGSEGV);
  261. return;
  262. }
  263. }
  264. dbgln("Unrecoverable page fault, {}{}{} address {}",
  265. regs.exception_code & PageFaultFlags::ReservedBitViolation ? "reserved bit violation / " : "",
  266. regs.exception_code & PageFaultFlags::InstructionFetch ? "instruction fetch / " : "",
  267. regs.exception_code & PageFaultFlags::Write ? "write to" : "read from",
  268. VirtualAddress(fault_address));
  269. u32 malloc_scrub_pattern = explode_byte(MALLOC_SCRUB_BYTE);
  270. u32 free_scrub_pattern = explode_byte(FREE_SCRUB_BYTE);
  271. u32 kmalloc_scrub_pattern = explode_byte(KMALLOC_SCRUB_BYTE);
  272. u32 kfree_scrub_pattern = explode_byte(KFREE_SCRUB_BYTE);
  273. u32 slab_alloc_scrub_pattern = explode_byte(SLAB_ALLOC_SCRUB_BYTE);
  274. u32 slab_dealloc_scrub_pattern = explode_byte(SLAB_DEALLOC_SCRUB_BYTE);
  275. if ((fault_address & 0xffff0000) == (malloc_scrub_pattern & 0xffff0000)) {
  276. dbgln("Note: Address {} looks like it may be uninitialized malloc() memory", VirtualAddress(fault_address));
  277. } else if ((fault_address & 0xffff0000) == (free_scrub_pattern & 0xffff0000)) {
  278. dbgln("Note: Address {} looks like it may be recently free()'d memory", VirtualAddress(fault_address));
  279. } else if ((fault_address & 0xffff0000) == (kmalloc_scrub_pattern & 0xffff0000)) {
  280. dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
  281. } else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
  282. dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
  283. } else if ((fault_address & 0xffff0000) == (slab_alloc_scrub_pattern & 0xffff0000)) {
  284. dbgln("Note: Address {} looks like it may be uninitialized slab_alloc() memory", VirtualAddress(fault_address));
  285. } else if ((fault_address & 0xffff0000) == (slab_dealloc_scrub_pattern & 0xffff0000)) {
  286. dbgln("Note: Address {} looks like it may be recently slab_dealloc()'d memory", VirtualAddress(fault_address));
  287. } else if (fault_address < 4096) {
  288. dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
  289. }
  290. handle_crash(regs, "Page Fault", SIGSEGV, response == PageFaultResponse::OutOfMemory);
  291. } else if (response == PageFaultResponse::Continue) {
  292. #if PAGE_FAULT_DEBUG
  293. dbgln("Continuing after resolved page fault");
  294. #endif
  295. } else {
  296. VERIFY_NOT_REACHED();
  297. }
  298. }
  299. EH_ENTRY_NO_CODE(1, debug);
  300. void debug_handler(TrapFrame* trap)
  301. {
  302. clac();
  303. auto& regs = *trap->regs;
  304. auto current_thread = Thread::current();
  305. auto& process = current_thread->process();
  306. if ((regs.cs & 3) == 0) {
  307. PANIC("Debug exception in ring 0");
  308. }
  309. constexpr u8 REASON_SINGLESTEP = 14;
  310. bool is_reason_singlestep = (read_dr6() & (1 << REASON_SINGLESTEP));
  311. if (!is_reason_singlestep)
  312. return;
  313. if (auto tracer = process.tracer()) {
  314. tracer->set_regs(regs);
  315. }
  316. current_thread->send_urgent_signal_to_self(SIGTRAP);
  317. }
  318. EH_ENTRY_NO_CODE(3, breakpoint);
  319. void breakpoint_handler(TrapFrame* trap)
  320. {
  321. clac();
  322. auto& regs = *trap->regs;
  323. auto current_thread = Thread::current();
  324. auto& process = current_thread->process();
  325. if ((regs.cs & 3) == 0) {
  326. PANIC("Breakpoint trap in ring 0");
  327. }
  328. if (auto tracer = process.tracer()) {
  329. tracer->set_regs(regs);
  330. }
  331. current_thread->send_urgent_signal_to_self(SIGTRAP);
  332. }
  333. #define EH(i, msg) \
  334. static void _exception##i() \
  335. { \
  336. dbgln("{}", msg); \
  337. PANIC("cr0={:08x} cr2={:08x} cr3={:08x} cr4={:08x}", read_cr0(), read_cr2(), read_cr3(), read_cr4()); \
  338. }
  339. EH(2, "Unknown error")
  340. EH(4, "Overflow")
  341. EH(5, "Bounds check")
  342. EH(8, "Double fault")
  343. EH(9, "Coprocessor segment overrun")
  344. EH(10, "Invalid TSS")
  345. EH(11, "Segment not present")
  346. EH(12, "Stack exception")
  347. EH(15, "Unknown error")
  348. EH(16, "Coprocessor error")
  349. const DescriptorTablePointer& get_idtr()
  350. {
  351. return s_idtr;
  352. }
  353. static void unimp_trap()
  354. {
  355. PANIC("Unhandled IRQ");
  356. }
  357. GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
  358. {
  359. auto*& handler_slot = s_interrupt_handler[interrupt_number];
  360. VERIFY(handler_slot != nullptr);
  361. return *handler_slot;
  362. }
  363. static void revert_to_unused_handler(u8 interrupt_number)
  364. {
  365. auto handler = new UnhandledInterruptHandler(interrupt_number);
  366. handler->register_interrupt_handler();
  367. }
  368. void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
  369. {
  370. VERIFY(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
  371. auto*& handler_slot = s_interrupt_handler[interrupt_number];
  372. if (handler_slot != nullptr) {
  373. if (handler_slot->type() == HandlerType::UnhandledInterruptHandler) {
  374. if (handler_slot) {
  375. auto* unhandled_handler = static_cast<UnhandledInterruptHandler*>(handler_slot);
  376. unhandled_handler->unregister_interrupt_handler();
  377. delete unhandled_handler;
  378. }
  379. handler_slot = &handler;
  380. return;
  381. }
  382. if (handler_slot->is_shared_handler() && !handler_slot->is_sharing_with_others()) {
  383. VERIFY(handler_slot->type() == HandlerType::SharedIRQHandler);
  384. static_cast<SharedIRQHandler*>(handler_slot)->register_handler(handler);
  385. return;
  386. }
  387. if (!handler_slot->is_shared_handler()) {
  388. if (handler_slot->type() == HandlerType::SpuriousInterruptHandler) {
  389. static_cast<SpuriousInterruptHandler*>(handler_slot)->register_handler(handler);
  390. return;
  391. }
  392. VERIFY(handler_slot->type() == HandlerType::IRQHandler);
  393. auto& previous_handler = *handler_slot;
  394. handler_slot = nullptr;
  395. SharedIRQHandler::initialize(interrupt_number);
  396. VERIFY(handler_slot);
  397. static_cast<SharedIRQHandler*>(handler_slot)->register_handler(previous_handler);
  398. static_cast<SharedIRQHandler*>(handler_slot)->register_handler(handler);
  399. return;
  400. }
  401. VERIFY_NOT_REACHED();
  402. } else {
  403. handler_slot = &handler;
  404. }
  405. }
  406. void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
  407. {
  408. auto*& handler_slot = s_interrupt_handler[interrupt_number];
  409. VERIFY(handler_slot != nullptr);
  410. if (handler_slot->type() == HandlerType::UnhandledInterruptHandler) {
  411. dbgln("Trying to unregister unused handler (?)");
  412. return;
  413. }
  414. if (handler_slot->is_shared_handler() && !handler_slot->is_sharing_with_others()) {
  415. VERIFY(handler_slot->type() == HandlerType::SharedIRQHandler);
  416. auto* shared_handler = static_cast<SharedIRQHandler*>(handler_slot);
  417. shared_handler->unregister_handler(handler);
  418. if (!shared_handler->sharing_devices_count()) {
  419. handler_slot = nullptr;
  420. revert_to_unused_handler(interrupt_number);
  421. }
  422. return;
  423. }
  424. if (!handler_slot->is_shared_handler()) {
  425. VERIFY(handler_slot->type() == HandlerType::IRQHandler);
  426. handler_slot = nullptr;
  427. revert_to_unused_handler(interrupt_number);
  428. return;
  429. }
  430. VERIFY_NOT_REACHED();
  431. }
  432. UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*handler)())
  433. {
  434. s_idt[index].low = 0x00080000 | LSW((FlatPtr)(handler));
  435. s_idt[index].high = ((FlatPtr)(handler)&0xffff0000) | 0x8e00;
  436. }
  437. UNMAP_AFTER_INIT void register_user_callable_interrupt_handler(u8 index, void (*handler)())
  438. {
  439. s_idt[index].low = 0x00080000 | LSW(((FlatPtr)handler));
  440. s_idt[index].high = ((FlatPtr)(handler)&0xffff0000) | 0xef00;
  441. }
  442. UNMAP_AFTER_INIT void flush_idt()
  443. {
  444. asm("lidt %0" ::"m"(s_idtr));
  445. }
  446. UNMAP_AFTER_INIT static void idt_init()
  447. {
  448. s_idtr.address = s_idt;
  449. s_idtr.limit = 256 * 8 - 1;
  450. register_interrupt_handler(0x00, divide_error_asm_entry);
  451. register_user_callable_interrupt_handler(0x01, debug_asm_entry);
  452. register_interrupt_handler(0x02, _exception2);
  453. register_user_callable_interrupt_handler(0x03, breakpoint_asm_entry);
  454. register_interrupt_handler(0x04, _exception4);
  455. register_interrupt_handler(0x05, _exception5);
  456. register_interrupt_handler(0x06, illegal_instruction_asm_entry);
  457. register_interrupt_handler(0x07, fpu_exception_asm_entry);
  458. register_interrupt_handler(0x08, _exception8);
  459. register_interrupt_handler(0x09, _exception9);
  460. register_interrupt_handler(0x0a, _exception10);
  461. register_interrupt_handler(0x0b, _exception11);
  462. register_interrupt_handler(0x0c, _exception12);
  463. register_interrupt_handler(0x0d, general_protection_fault_asm_entry);
  464. register_interrupt_handler(0x0e, page_fault_asm_entry);
  465. register_interrupt_handler(0x0f, _exception15);
  466. register_interrupt_handler(0x10, _exception16);
  467. for (u8 i = 0x11; i < 0x50; i++)
  468. register_interrupt_handler(i, unimp_trap);
  469. register_interrupt_handler(0x50, interrupt_80_asm_entry);
  470. register_interrupt_handler(0x51, interrupt_81_asm_entry);
  471. register_interrupt_handler(0x52, interrupt_82_asm_entry);
  472. register_interrupt_handler(0x53, interrupt_83_asm_entry);
  473. register_interrupt_handler(0x54, interrupt_84_asm_entry);
  474. register_interrupt_handler(0x55, interrupt_85_asm_entry);
  475. register_interrupt_handler(0x56, interrupt_86_asm_entry);
  476. register_interrupt_handler(0x57, interrupt_87_asm_entry);
  477. register_interrupt_handler(0x58, interrupt_88_asm_entry);
  478. register_interrupt_handler(0x59, interrupt_89_asm_entry);
  479. register_interrupt_handler(0x5a, interrupt_90_asm_entry);
  480. register_interrupt_handler(0x5b, interrupt_91_asm_entry);
  481. register_interrupt_handler(0x5c, interrupt_92_asm_entry);
  482. register_interrupt_handler(0x5d, interrupt_93_asm_entry);
  483. register_interrupt_handler(0x5e, interrupt_94_asm_entry);
  484. register_interrupt_handler(0x5f, interrupt_95_asm_entry);
  485. register_interrupt_handler(0x60, interrupt_96_asm_entry);
  486. register_interrupt_handler(0x61, interrupt_97_asm_entry);
  487. register_interrupt_handler(0x62, interrupt_98_asm_entry);
  488. register_interrupt_handler(0x63, interrupt_99_asm_entry);
  489. register_interrupt_handler(0x64, interrupt_100_asm_entry);
  490. register_interrupt_handler(0x65, interrupt_101_asm_entry);
  491. register_interrupt_handler(0x66, interrupt_102_asm_entry);
  492. register_interrupt_handler(0x67, interrupt_103_asm_entry);
  493. register_interrupt_handler(0x68, interrupt_104_asm_entry);
  494. register_interrupt_handler(0x69, interrupt_105_asm_entry);
  495. register_interrupt_handler(0x6a, interrupt_106_asm_entry);
  496. register_interrupt_handler(0x6b, interrupt_107_asm_entry);
  497. register_interrupt_handler(0x6c, interrupt_108_asm_entry);
  498. register_interrupt_handler(0x6d, interrupt_109_asm_entry);
  499. register_interrupt_handler(0x6e, interrupt_110_asm_entry);
  500. register_interrupt_handler(0x6f, interrupt_111_asm_entry);
  501. register_interrupt_handler(0x70, interrupt_112_asm_entry);
  502. register_interrupt_handler(0x71, interrupt_113_asm_entry);
  503. register_interrupt_handler(0x72, interrupt_114_asm_entry);
  504. register_interrupt_handler(0x73, interrupt_115_asm_entry);
  505. register_interrupt_handler(0x74, interrupt_116_asm_entry);
  506. register_interrupt_handler(0x75, interrupt_117_asm_entry);
  507. register_interrupt_handler(0x76, interrupt_118_asm_entry);
  508. register_interrupt_handler(0x77, interrupt_119_asm_entry);
  509. register_interrupt_handler(0x78, interrupt_120_asm_entry);
  510. register_interrupt_handler(0x79, interrupt_121_asm_entry);
  511. register_interrupt_handler(0x7a, interrupt_122_asm_entry);
  512. register_interrupt_handler(0x7b, interrupt_123_asm_entry);
  513. register_interrupt_handler(0x7c, interrupt_124_asm_entry);
  514. register_interrupt_handler(0x7d, interrupt_125_asm_entry);
  515. register_interrupt_handler(0x7e, interrupt_126_asm_entry);
  516. register_interrupt_handler(0x7f, interrupt_127_asm_entry);
  517. register_interrupt_handler(0x80, interrupt_128_asm_entry);
  518. register_interrupt_handler(0x81, interrupt_129_asm_entry);
  519. register_interrupt_handler(0x82, interrupt_130_asm_entry);
  520. register_interrupt_handler(0x83, interrupt_131_asm_entry);
  521. register_interrupt_handler(0x84, interrupt_132_asm_entry);
  522. register_interrupt_handler(0x85, interrupt_133_asm_entry);
  523. register_interrupt_handler(0x86, interrupt_134_asm_entry);
  524. register_interrupt_handler(0x87, interrupt_135_asm_entry);
  525. register_interrupt_handler(0x88, interrupt_136_asm_entry);
  526. register_interrupt_handler(0x89, interrupt_137_asm_entry);
  527. register_interrupt_handler(0x8a, interrupt_138_asm_entry);
  528. register_interrupt_handler(0x8b, interrupt_139_asm_entry);
  529. register_interrupt_handler(0x8c, interrupt_140_asm_entry);
  530. register_interrupt_handler(0x8d, interrupt_141_asm_entry);
  531. register_interrupt_handler(0x8e, interrupt_142_asm_entry);
  532. register_interrupt_handler(0x8f, interrupt_143_asm_entry);
  533. register_interrupt_handler(0x90, interrupt_144_asm_entry);
  534. register_interrupt_handler(0x91, interrupt_145_asm_entry);
  535. register_interrupt_handler(0x92, interrupt_146_asm_entry);
  536. register_interrupt_handler(0x93, interrupt_147_asm_entry);
  537. register_interrupt_handler(0x94, interrupt_148_asm_entry);
  538. register_interrupt_handler(0x95, interrupt_149_asm_entry);
  539. register_interrupt_handler(0x96, interrupt_150_asm_entry);
  540. register_interrupt_handler(0x97, interrupt_151_asm_entry);
  541. register_interrupt_handler(0x98, interrupt_152_asm_entry);
  542. register_interrupt_handler(0x99, interrupt_153_asm_entry);
  543. register_interrupt_handler(0x9a, interrupt_154_asm_entry);
  544. register_interrupt_handler(0x9b, interrupt_155_asm_entry);
  545. register_interrupt_handler(0x9c, interrupt_156_asm_entry);
  546. register_interrupt_handler(0x9d, interrupt_157_asm_entry);
  547. register_interrupt_handler(0x9e, interrupt_158_asm_entry);
  548. register_interrupt_handler(0x9f, interrupt_159_asm_entry);
  549. register_interrupt_handler(0xa0, interrupt_160_asm_entry);
  550. register_interrupt_handler(0xa1, interrupt_161_asm_entry);
  551. register_interrupt_handler(0xa2, interrupt_162_asm_entry);
  552. register_interrupt_handler(0xa3, interrupt_163_asm_entry);
  553. register_interrupt_handler(0xa4, interrupt_164_asm_entry);
  554. register_interrupt_handler(0xa5, interrupt_165_asm_entry);
  555. register_interrupt_handler(0xa6, interrupt_166_asm_entry);
  556. register_interrupt_handler(0xa7, interrupt_167_asm_entry);
  557. register_interrupt_handler(0xa8, interrupt_168_asm_entry);
  558. register_interrupt_handler(0xa9, interrupt_169_asm_entry);
  559. register_interrupt_handler(0xaa, interrupt_170_asm_entry);
  560. register_interrupt_handler(0xab, interrupt_171_asm_entry);
  561. register_interrupt_handler(0xac, interrupt_172_asm_entry);
  562. register_interrupt_handler(0xad, interrupt_173_asm_entry);
  563. register_interrupt_handler(0xae, interrupt_174_asm_entry);
  564. register_interrupt_handler(0xaf, interrupt_175_asm_entry);
  565. register_interrupt_handler(0xb0, interrupt_176_asm_entry);
  566. register_interrupt_handler(0xb1, interrupt_177_asm_entry);
  567. register_interrupt_handler(0xb2, interrupt_178_asm_entry);
  568. register_interrupt_handler(0xb3, interrupt_179_asm_entry);
  569. register_interrupt_handler(0xb4, interrupt_180_asm_entry);
  570. register_interrupt_handler(0xb5, interrupt_181_asm_entry);
  571. register_interrupt_handler(0xb6, interrupt_182_asm_entry);
  572. register_interrupt_handler(0xb7, interrupt_183_asm_entry);
  573. register_interrupt_handler(0xb8, interrupt_184_asm_entry);
  574. register_interrupt_handler(0xb9, interrupt_185_asm_entry);
  575. register_interrupt_handler(0xba, interrupt_186_asm_entry);
  576. register_interrupt_handler(0xbb, interrupt_187_asm_entry);
  577. register_interrupt_handler(0xbc, interrupt_188_asm_entry);
  578. register_interrupt_handler(0xbd, interrupt_189_asm_entry);
  579. register_interrupt_handler(0xbe, interrupt_190_asm_entry);
  580. register_interrupt_handler(0xbf, interrupt_191_asm_entry);
  581. register_interrupt_handler(0xc0, interrupt_192_asm_entry);
  582. register_interrupt_handler(0xc1, interrupt_193_asm_entry);
  583. register_interrupt_handler(0xc2, interrupt_194_asm_entry);
  584. register_interrupt_handler(0xc3, interrupt_195_asm_entry);
  585. register_interrupt_handler(0xc4, interrupt_196_asm_entry);
  586. register_interrupt_handler(0xc5, interrupt_197_asm_entry);
  587. register_interrupt_handler(0xc6, interrupt_198_asm_entry);
  588. register_interrupt_handler(0xc7, interrupt_199_asm_entry);
  589. register_interrupt_handler(0xc8, interrupt_200_asm_entry);
  590. register_interrupt_handler(0xc9, interrupt_201_asm_entry);
  591. register_interrupt_handler(0xca, interrupt_202_asm_entry);
  592. register_interrupt_handler(0xcb, interrupt_203_asm_entry);
  593. register_interrupt_handler(0xcc, interrupt_204_asm_entry);
  594. register_interrupt_handler(0xcd, interrupt_205_asm_entry);
  595. register_interrupt_handler(0xce, interrupt_206_asm_entry);
  596. register_interrupt_handler(0xcf, interrupt_207_asm_entry);
  597. register_interrupt_handler(0xd0, interrupt_208_asm_entry);
  598. register_interrupt_handler(0xd1, interrupt_209_asm_entry);
  599. register_interrupt_handler(0xd2, interrupt_210_asm_entry);
  600. register_interrupt_handler(0xd3, interrupt_211_asm_entry);
  601. register_interrupt_handler(0xd4, interrupt_212_asm_entry);
  602. register_interrupt_handler(0xd5, interrupt_213_asm_entry);
  603. register_interrupt_handler(0xd6, interrupt_214_asm_entry);
  604. register_interrupt_handler(0xd7, interrupt_215_asm_entry);
  605. register_interrupt_handler(0xd8, interrupt_216_asm_entry);
  606. register_interrupt_handler(0xd9, interrupt_217_asm_entry);
  607. register_interrupt_handler(0xda, interrupt_218_asm_entry);
  608. register_interrupt_handler(0xdb, interrupt_219_asm_entry);
  609. register_interrupt_handler(0xdc, interrupt_220_asm_entry);
  610. register_interrupt_handler(0xdd, interrupt_221_asm_entry);
  611. register_interrupt_handler(0xde, interrupt_222_asm_entry);
  612. register_interrupt_handler(0xdf, interrupt_223_asm_entry);
  613. register_interrupt_handler(0xe0, interrupt_224_asm_entry);
  614. register_interrupt_handler(0xe1, interrupt_225_asm_entry);
  615. register_interrupt_handler(0xe2, interrupt_226_asm_entry);
  616. register_interrupt_handler(0xe3, interrupt_227_asm_entry);
  617. register_interrupt_handler(0xe4, interrupt_228_asm_entry);
  618. register_interrupt_handler(0xe5, interrupt_229_asm_entry);
  619. register_interrupt_handler(0xe6, interrupt_230_asm_entry);
  620. register_interrupt_handler(0xe7, interrupt_231_asm_entry);
  621. register_interrupt_handler(0xe8, interrupt_232_asm_entry);
  622. register_interrupt_handler(0xe9, interrupt_233_asm_entry);
  623. register_interrupt_handler(0xea, interrupt_234_asm_entry);
  624. register_interrupt_handler(0xeb, interrupt_235_asm_entry);
  625. register_interrupt_handler(0xec, interrupt_236_asm_entry);
  626. register_interrupt_handler(0xed, interrupt_237_asm_entry);
  627. register_interrupt_handler(0xee, interrupt_238_asm_entry);
  628. register_interrupt_handler(0xef, interrupt_239_asm_entry);
  629. register_interrupt_handler(0xf0, interrupt_240_asm_entry);
  630. register_interrupt_handler(0xf1, interrupt_241_asm_entry);
  631. register_interrupt_handler(0xf2, interrupt_242_asm_entry);
  632. register_interrupt_handler(0xf3, interrupt_243_asm_entry);
  633. register_interrupt_handler(0xf4, interrupt_244_asm_entry);
  634. register_interrupt_handler(0xf5, interrupt_245_asm_entry);
  635. register_interrupt_handler(0xf6, interrupt_246_asm_entry);
  636. register_interrupt_handler(0xf7, interrupt_247_asm_entry);
  637. register_interrupt_handler(0xf8, interrupt_248_asm_entry);
  638. register_interrupt_handler(0xf9, interrupt_249_asm_entry);
  639. register_interrupt_handler(0xfa, interrupt_250_asm_entry);
  640. register_interrupt_handler(0xfb, interrupt_251_asm_entry);
  641. register_interrupt_handler(0xfc, interrupt_252_asm_entry);
  642. register_interrupt_handler(0xfd, interrupt_253_asm_entry);
  643. register_interrupt_handler(0xfe, interrupt_254_asm_entry);
  644. register_interrupt_handler(0xff, interrupt_255_asm_entry);
  645. dbgln("Installing Unhandled Handlers");
  646. for (u8 i = 0; i < GENERIC_INTERRUPT_HANDLERS_COUNT; ++i) {
  647. auto* handler = new UnhandledInterruptHandler(i);
  648. handler->register_interrupt_handler();
  649. }
  650. flush_idt();
  651. }
  652. void load_task_register(u16 selector)
  653. {
  654. asm("ltr %0" ::"r"(selector));
  655. }
  656. void handle_interrupt(TrapFrame* trap)
  657. {
  658. clac();
  659. auto& regs = *trap->regs;
  660. VERIFY(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
  661. u8 irq = (u8)(regs.isr_number - 0x50);
  662. s_entropy_source_interrupts.add_random_event(irq);
  663. auto* handler = s_interrupt_handler[irq];
  664. VERIFY(handler);
  665. handler->increment_invoking_counter();
  666. handler->handle_interrupt(regs);
  667. handler->eoi();
  668. }
  669. void enter_trap_no_irq(TrapFrame* trap)
  670. {
  671. InterruptDisabler disable;
  672. Processor::current().enter_trap(*trap, false);
  673. }
  674. void enter_trap(TrapFrame* trap)
  675. {
  676. InterruptDisabler disable;
  677. Processor::current().enter_trap(*trap, true);
  678. }
  679. void exit_trap(TrapFrame* trap)
  680. {
  681. InterruptDisabler disable;
  682. return Processor::current().exit_trap(*trap);
  683. }
  684. UNMAP_AFTER_INIT void write_cr0(FlatPtr value)
  685. {
  686. #if ARCH(I386)
  687. asm volatile("mov %%eax, %%cr0" ::"a"(value));
  688. #else
  689. asm volatile("mov %%rax, %%cr0" ::"a"(value));
  690. #endif
  691. }
  692. UNMAP_AFTER_INIT void write_cr4(FlatPtr value)
  693. {
  694. #if ARCH(I386)
  695. asm volatile("mov %%eax, %%cr4" ::"a"(value));
  696. #else
  697. asm volatile("mov %%rax, %%cr4" ::"a"(value));
  698. #endif
  699. }
  700. UNMAP_AFTER_INIT static void sse_init()
  701. {
  702. write_cr0((read_cr0() & 0xfffffffbu) | 0x2);
  703. write_cr4(read_cr4() | 0x600);
  704. }
  705. FlatPtr read_cr0()
  706. {
  707. FlatPtr cr0;
  708. #if ARCH(I386)
  709. asm("mov %%cr0, %%eax"
  710. : "=a"(cr0));
  711. #else
  712. asm("mov %%cr0, %%rax"
  713. : "=a"(cr0));
  714. #endif
  715. return cr0;
  716. }
  717. FlatPtr read_cr2()
  718. {
  719. FlatPtr cr2;
  720. #if ARCH(I386)
  721. asm("mov %%cr2, %%eax"
  722. : "=a"(cr2));
  723. #else
  724. asm("mov %%cr2, %%rax"
  725. : "=a"(cr2));
  726. #endif
  727. return cr2;
  728. }
  729. FlatPtr read_cr3()
  730. {
  731. FlatPtr cr3;
  732. #if ARCH(I386)
  733. asm("mov %%cr3, %%eax"
  734. : "=a"(cr3));
  735. #else
  736. asm("mov %%cr3, %%rax"
  737. : "=a"(cr3));
  738. #endif
  739. return cr3;
  740. }
  741. void write_cr3(FlatPtr cr3)
  742. {
  743. // NOTE: If you're here from a GPF crash, it's very likely that a PDPT entry is incorrect, not this!
  744. #if ARCH(I386)
  745. asm volatile("mov %%eax, %%cr3" ::"a"(cr3)
  746. : "memory");
  747. #else
  748. asm volatile("mov %%rax, %%cr3" ::"a"(cr3)
  749. : "memory");
  750. #endif
  751. }
  752. FlatPtr read_cr4()
  753. {
  754. FlatPtr cr4;
  755. #if ARCH(I386)
  756. asm("mov %%cr4, %%eax"
  757. : "=a"(cr4));
  758. #else
  759. asm("mov %%cr4, %%rax"
  760. : "=a"(cr4));
  761. #endif
  762. return cr4;
  763. }
  764. FlatPtr read_dr6()
  765. {
  766. FlatPtr dr6;
  767. #if ARCH(I386)
  768. asm("mov %%dr6, %%eax"
  769. : "=a"(dr6));
  770. #else
  771. asm("mov %%dr6, %%rax"
  772. : "=a"(dr6));
  773. #endif
  774. return dr6;
  775. }
  776. #define XCR_XFEATURE_ENABLED_MASK 0
  777. UNMAP_AFTER_INIT u64 read_xcr0()
  778. {
  779. u32 eax, edx;
  780. asm volatile("xgetbv"
  781. : "=a"(eax), "=d"(edx)
  782. : "c"(XCR_XFEATURE_ENABLED_MASK));
  783. return eax + ((u64)edx << 32);
  784. }
  785. UNMAP_AFTER_INIT void write_xcr0(u64 value)
  786. {
  787. u32 eax = value;
  788. u32 edx = value >> 32;
  789. asm volatile("xsetbv" ::"a"(eax), "d"(edx), "c"(XCR_XFEATURE_ENABLED_MASK));
  790. }
  791. READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
  792. READONLY_AFTER_INIT static Vector<Processor*>* s_processors;
  793. static SpinLock s_processor_lock;
  794. READONLY_AFTER_INIT volatile u32 Processor::g_total_processors;
  795. static volatile bool s_smp_enabled;
  796. Vector<Processor*>& Processor::processors()
  797. {
  798. VERIFY(s_processors);
  799. return *s_processors;
  800. }
  801. Processor& Processor::by_id(u32 cpu)
  802. {
  803. // s_processors does not need to be protected by a lock of any kind.
  804. // It is populated early in the boot process, and the BSP is waiting
  805. // for all APs to finish, after which this array never gets modified
  806. // again, so it's safe to not protect access to it here
  807. auto& procs = processors();
  808. VERIFY(procs[cpu] != nullptr);
  809. VERIFY(procs.size() > cpu);
  810. return *procs[cpu];
  811. }
  812. [[noreturn]] static inline void halt_this()
  813. {
  814. for (;;) {
  815. asm volatile("cli; hlt");
  816. }
  817. }
  818. UNMAP_AFTER_INIT void Processor::cpu_detect()
  819. {
  820. // NOTE: This is called during Processor::early_initialize, we cannot
  821. // safely log at this point because we don't have kmalloc
  822. // initialized yet!
  823. auto set_feature =
  824. [&](CPUFeature f) {
  825. m_features = static_cast<CPUFeature>(static_cast<u32>(m_features) | static_cast<u32>(f));
  826. };
  827. m_features = static_cast<CPUFeature>(0);
  828. CPUID processor_info(0x1);
  829. if (processor_info.edx() & (1 << 4))
  830. set_feature(CPUFeature::TSC);
  831. if (processor_info.edx() & (1 << 6))
  832. set_feature(CPUFeature::PAE);
  833. if (processor_info.edx() & (1 << 13))
  834. set_feature(CPUFeature::PGE);
  835. if (processor_info.edx() & (1 << 23))
  836. set_feature(CPUFeature::MMX);
  837. if (processor_info.edx() & (1 << 25))
  838. set_feature(CPUFeature::SSE);
  839. if (processor_info.edx() & (1 << 26))
  840. set_feature(CPUFeature::SSE2);
  841. if (processor_info.ecx() & (1 << 0))
  842. set_feature(CPUFeature::SSE3);
  843. if (processor_info.ecx() & (1 << 9))
  844. set_feature(CPUFeature::SSSE3);
  845. if (processor_info.ecx() & (1 << 19))
  846. set_feature(CPUFeature::SSE4_1);
  847. if (processor_info.ecx() & (1 << 20))
  848. set_feature(CPUFeature::SSE4_2);
  849. if (processor_info.ecx() & (1 << 26))
  850. set_feature(CPUFeature::XSAVE);
  851. if (processor_info.ecx() & (1 << 28))
  852. set_feature(CPUFeature::AVX);
  853. if (processor_info.ecx() & (1 << 30))
  854. set_feature(CPUFeature::RDRAND);
  855. if (processor_info.edx() & (1 << 11)) {
  856. u32 stepping = processor_info.eax() & 0xf;
  857. u32 model = (processor_info.eax() >> 4) & 0xf;
  858. u32 family = (processor_info.eax() >> 8) & 0xf;
  859. if (!(family == 6 && model < 3 && stepping < 3))
  860. set_feature(CPUFeature::SEP);
  861. if ((family == 6 && model >= 3) || (family == 0xf && model >= 0xe))
  862. set_feature(CPUFeature::CONSTANT_TSC);
  863. }
  864. u32 max_extended_leaf = CPUID(0x80000000).eax();
  865. VERIFY(max_extended_leaf >= 0x80000001);
  866. CPUID extended_processor_info(0x80000001);
  867. if (extended_processor_info.edx() & (1 << 20))
  868. set_feature(CPUFeature::NX);
  869. if (extended_processor_info.edx() & (1 << 27))
  870. set_feature(CPUFeature::RDTSCP);
  871. if (extended_processor_info.edx() & (1 << 11)) {
  872. // Only available in 64 bit mode
  873. set_feature(CPUFeature::SYSCALL);
  874. }
  875. if (max_extended_leaf >= 0x80000007) {
  876. CPUID cpuid(0x80000007);
  877. if (cpuid.edx() & (1 << 8)) {
  878. set_feature(CPUFeature::CONSTANT_TSC);
  879. set_feature(CPUFeature::NONSTOP_TSC);
  880. }
  881. }
  882. if (max_extended_leaf >= 0x80000008) {
  883. // CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
  884. CPUID cpuid(0x80000008);
  885. m_physical_address_bit_width = cpuid.eax() & 0xff;
  886. } else {
  887. // For processors that do not support CPUID function 80000008H, the width is generally 36 if CPUID.01H:EDX.PAE [bit 6] = 1 and 32 otherwise.
  888. m_physical_address_bit_width = has_feature(CPUFeature::PAE) ? 36 : 32;
  889. }
  890. CPUID extended_features(0x7);
  891. if (extended_features.ebx() & (1 << 20))
  892. set_feature(CPUFeature::SMAP);
  893. if (extended_features.ebx() & (1 << 7))
  894. set_feature(CPUFeature::SMEP);
  895. if (extended_features.ecx() & (1 << 2))
  896. set_feature(CPUFeature::UMIP);
  897. if (extended_features.ebx() & (1 << 18))
  898. set_feature(CPUFeature::RDSEED);
  899. }
  900. UNMAP_AFTER_INIT void Processor::cpu_setup()
  901. {
  902. // NOTE: This is called during Processor::early_initialize, we cannot
  903. // safely log at this point because we don't have kmalloc
  904. // initialized yet!
  905. cpu_detect();
  906. if (has_feature(CPUFeature::SSE))
  907. sse_init();
  908. write_cr0(read_cr0() | 0x00010000);
  909. if (has_feature(CPUFeature::PGE)) {
  910. // Turn on CR4.PGE so the CPU will respect the G bit in page tables.
  911. write_cr4(read_cr4() | 0x80);
  912. }
  913. if (has_feature(CPUFeature::NX)) {
  914. // Turn on IA32_EFER.NXE
  915. asm volatile(
  916. "movl $0xc0000080, %ecx\n"
  917. "rdmsr\n"
  918. "orl $0x800, %eax\n"
  919. "wrmsr\n");
  920. }
  921. if (has_feature(CPUFeature::SMEP)) {
  922. // Turn on CR4.SMEP
  923. write_cr4(read_cr4() | 0x100000);
  924. }
  925. if (has_feature(CPUFeature::SMAP)) {
  926. // Turn on CR4.SMAP
  927. write_cr4(read_cr4() | 0x200000);
  928. }
  929. if (has_feature(CPUFeature::UMIP)) {
  930. write_cr4(read_cr4() | 0x800);
  931. }
  932. if (has_feature(CPUFeature::TSC)) {
  933. write_cr4(read_cr4() | 0x4);
  934. }
  935. if (has_feature(CPUFeature::XSAVE)) {
  936. // Turn on CR4.OSXSAVE
  937. write_cr4(read_cr4() | 0x40000);
  938. // According to the Intel manual: "After reset, all bits (except bit 0) in XCR0 are cleared to zero; XCR0[0] is set to 1."
  939. // Sadly we can't trust this, for example VirtualBox starts with bits 0-4 set, so let's do it ourselves.
  940. write_xcr0(0x1);
  941. if (has_feature(CPUFeature::AVX)) {
  942. // Turn on SSE, AVX and x87 flags
  943. write_xcr0(read_xcr0() | 0x7);
  944. }
  945. }
  946. }
  947. String Processor::features_string() const
  948. {
  949. StringBuilder builder;
  950. auto feature_to_str =
  951. [](CPUFeature f) -> const char* {
  952. switch (f) {
  953. case CPUFeature::NX:
  954. return "nx";
  955. case CPUFeature::PAE:
  956. return "pae";
  957. case CPUFeature::PGE:
  958. return "pge";
  959. case CPUFeature::RDRAND:
  960. return "rdrand";
  961. case CPUFeature::RDSEED:
  962. return "rdseed";
  963. case CPUFeature::SMAP:
  964. return "smap";
  965. case CPUFeature::SMEP:
  966. return "smep";
  967. case CPUFeature::SSE:
  968. return "sse";
  969. case CPUFeature::TSC:
  970. return "tsc";
  971. case CPUFeature::RDTSCP:
  972. return "rdtscp";
  973. case CPUFeature::CONSTANT_TSC:
  974. return "constant_tsc";
  975. case CPUFeature::NONSTOP_TSC:
  976. return "nonstop_tsc";
  977. case CPUFeature::UMIP:
  978. return "umip";
  979. case CPUFeature::SEP:
  980. return "sep";
  981. case CPUFeature::SYSCALL:
  982. return "syscall";
  983. case CPUFeature::MMX:
  984. return "mmx";
  985. case CPUFeature::SSE2:
  986. return "sse2";
  987. case CPUFeature::SSE3:
  988. return "sse3";
  989. case CPUFeature::SSSE3:
  990. return "ssse3";
  991. case CPUFeature::SSE4_1:
  992. return "sse4.1";
  993. case CPUFeature::SSE4_2:
  994. return "sse4.2";
  995. case CPUFeature::XSAVE:
  996. return "xsave";
  997. case CPUFeature::AVX:
  998. return "avx";
  999. // no default statement here intentionally so that we get
  1000. // a warning if a new feature is forgotten to be added here
  1001. }
  1002. // Shouldn't ever happen
  1003. return "???";
  1004. };
  1005. bool first = true;
  1006. for (u32 flag = 1; flag != 0; flag <<= 1) {
  1007. if ((static_cast<u32>(m_features) & flag) != 0) {
  1008. if (first)
  1009. first = false;
  1010. else
  1011. builder.append(' ');
  1012. auto str = feature_to_str(static_cast<CPUFeature>(flag));
  1013. builder.append(str, strlen(str));
  1014. }
  1015. }
  1016. return builder.build();
  1017. }
  1018. String Processor::platform_string() const
  1019. {
  1020. return "i386";
  1021. }
  1022. UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
  1023. {
  1024. m_self = this;
  1025. m_cpu = cpu;
  1026. m_in_irq = 0;
  1027. m_in_critical = 0;
  1028. m_invoke_scheduler_async = false;
  1029. m_scheduler_initialized = false;
  1030. m_message_queue = nullptr;
  1031. m_idle_thread = nullptr;
  1032. m_current_thread = nullptr;
  1033. m_scheduler_data = nullptr;
  1034. m_mm_data = nullptr;
  1035. m_info = nullptr;
  1036. m_halt_requested = false;
  1037. if (cpu == 0) {
  1038. s_smp_enabled = false;
  1039. atomic_store(&g_total_processors, 1u, AK::MemoryOrder::memory_order_release);
  1040. } else {
  1041. atomic_fetch_add(&g_total_processors, 1u, AK::MemoryOrder::memory_order_acq_rel);
  1042. }
  1043. deferred_call_pool_init();
  1044. cpu_setup();
  1045. gdt_init();
  1046. VERIFY(is_initialized()); // sanity check
  1047. VERIFY(&current() == this); // sanity check
  1048. }
  1049. UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
  1050. {
  1051. VERIFY(m_self == this);
  1052. VERIFY(&current() == this); // sanity check
  1053. dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
  1054. if (!has_feature(CPUFeature::RDRAND))
  1055. dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", id());
  1056. dmesgln("CPU[{}]: Physical address bit width: {}", id(), m_physical_address_bit_width);
  1057. if (cpu == 0)
  1058. idt_init();
  1059. else
  1060. flush_idt();
  1061. if (cpu == 0) {
  1062. VERIFY((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
  1063. asm volatile("fninit");
  1064. asm volatile("fxsave %0"
  1065. : "=m"(s_clean_fpu_state));
  1066. }
  1067. m_info = new ProcessorInfo(*this);
  1068. {
  1069. ScopedSpinLock lock(s_processor_lock);
  1070. // We need to prevent races between APs starting up at the same time
  1071. if (!s_processors)
  1072. s_processors = new Vector<Processor*>();
  1073. if (cpu >= s_processors->size())
  1074. s_processors->resize(cpu + 1);
  1075. (*s_processors)[cpu] = this;
  1076. }
  1077. }
  1078. void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
  1079. {
  1080. u16 i = (selector & 0xfffc) >> 3;
  1081. u32 prev_gdt_length = m_gdt_length;
  1082. if (i > m_gdt_length) {
  1083. m_gdt_length = i + 1;
  1084. VERIFY(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
  1085. m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
  1086. }
  1087. m_gdt[i].low = low;
  1088. m_gdt[i].high = high;
  1089. // clear selectors we may have skipped
  1090. while (i < prev_gdt_length) {
  1091. m_gdt[i].low = 0;
  1092. m_gdt[i].high = 0;
  1093. i++;
  1094. }
  1095. }
  1096. void Processor::write_gdt_entry(u16 selector, Descriptor& descriptor)
  1097. {
  1098. write_raw_gdt_entry(selector, descriptor.low, descriptor.high);
  1099. }
  1100. Descriptor& Processor::get_gdt_entry(u16 selector)
  1101. {
  1102. u16 i = (selector & 0xfffc) >> 3;
  1103. return *(Descriptor*)(&m_gdt[i]);
  1104. }
  1105. void Processor::flush_gdt()
  1106. {
  1107. m_gdtr.address = m_gdt;
  1108. m_gdtr.limit = (m_gdt_length * 8) - 1;
  1109. asm volatile("lgdt %0" ::"m"(m_gdtr)
  1110. : "memory");
  1111. }
  1112. const DescriptorTablePointer& Processor::get_gdtr()
  1113. {
  1114. return m_gdtr;
  1115. }
  1116. Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
  1117. {
  1118. FlatPtr frame_ptr = 0, eip = 0;
  1119. Vector<FlatPtr, 32> stack_trace;
  1120. auto walk_stack = [&](FlatPtr stack_ptr) {
  1121. static constexpr size_t max_stack_frames = 4096;
  1122. stack_trace.append(eip);
  1123. size_t count = 1;
  1124. while (stack_ptr && stack_trace.size() < max_stack_frames) {
  1125. FlatPtr retaddr;
  1126. count++;
  1127. if (max_frames != 0 && count > max_frames)
  1128. break;
  1129. if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
  1130. if (!copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]) || !retaddr)
  1131. break;
  1132. stack_trace.append(retaddr);
  1133. if (!copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr))
  1134. break;
  1135. } else {
  1136. void* fault_at;
  1137. if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at) || !retaddr)
  1138. break;
  1139. stack_trace.append(retaddr);
  1140. if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at))
  1141. break;
  1142. }
  1143. }
  1144. };
  1145. auto capture_current_thread = [&]() {
  1146. frame_ptr = (FlatPtr)__builtin_frame_address(0);
  1147. eip = (FlatPtr)__builtin_return_address(0);
  1148. walk_stack(frame_ptr);
  1149. };
  1150. // Since the thread may be running on another processor, there
  1151. // is a chance a context switch may happen while we're trying
  1152. // to get it. It also won't be entirely accurate and merely
  1153. // reflect the status at the last context switch.
  1154. ScopedSpinLock lock(g_scheduler_lock);
  1155. if (&thread == Processor::current_thread()) {
  1156. VERIFY(thread.state() == Thread::Running);
  1157. // Leave the scheduler lock. If we trigger page faults we may
  1158. // need to be preempted. Since this is our own thread it won't
  1159. // cause any problems as the stack won't change below this frame.
  1160. lock.unlock();
  1161. capture_current_thread();
  1162. } else if (thread.is_active()) {
  1163. VERIFY(thread.cpu() != Processor::id());
  1164. // If this is the case, the thread is currently running
  1165. // on another processor. We can't trust the kernel stack as
  1166. // it may be changing at any time. We need to probably send
  1167. // an IPI to that processor, have it walk the stack and wait
  1168. // until it returns the data back to us
  1169. auto& proc = Processor::current();
  1170. smp_unicast(
  1171. thread.cpu(),
  1172. [&]() {
  1173. dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
  1174. ProcessPagingScope paging_scope(thread.process());
  1175. VERIFY(&Processor::current() != &proc);
  1176. VERIFY(&thread == Processor::current_thread());
  1177. // NOTE: Because the other processor is still holding the
  1178. // scheduler lock while waiting for this callback to finish,
  1179. // the current thread on the target processor cannot change
  1180. // TODO: What to do about page faults here? We might deadlock
  1181. // because the other processor is still holding the
  1182. // scheduler lock...
  1183. capture_current_thread();
  1184. },
  1185. false);
  1186. } else {
  1187. switch (thread.state()) {
  1188. case Thread::Running:
  1189. VERIFY_NOT_REACHED(); // should have been handled above
  1190. case Thread::Runnable:
  1191. case Thread::Stopped:
  1192. case Thread::Blocked:
  1193. case Thread::Dying:
  1194. case Thread::Dead: {
  1195. // We need to retrieve ebp from what was last pushed to the kernel
  1196. // stack. Before switching out of that thread, it switch_context
  1197. // pushed the callee-saved registers, and the last of them happens
  1198. // to be ebp.
  1199. ProcessPagingScope paging_scope(thread.process());
  1200. auto& tss = thread.tss();
  1201. u32* stack_top = reinterpret_cast<u32*>(tss.esp);
  1202. if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
  1203. if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
  1204. frame_ptr = 0;
  1205. } else {
  1206. void* fault_at;
  1207. if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
  1208. frame_ptr = 0;
  1209. }
  1210. eip = tss.eip;
  1211. // TODO: We need to leave the scheduler lock here, but we also
  1212. // need to prevent the target thread from being run while
  1213. // we walk the stack
  1214. lock.unlock();
  1215. walk_stack(frame_ptr);
  1216. break;
  1217. }
  1218. default:
  1219. dbgln("Cannot capture stack trace for thread {} in state {}", thread, thread.state_string());
  1220. break;
  1221. }
  1222. }
  1223. return stack_trace;
  1224. }
  1225. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
  1226. {
  1227. VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running);
  1228. VERIFY(to_thread->state() == Thread::Running);
  1229. Processor::set_current_thread(*to_thread);
  1230. auto& from_tss = from_thread->tss();
  1231. auto& to_tss = to_thread->tss();
  1232. asm volatile("fxsave %0"
  1233. : "=m"(from_thread->fpu_state()));
  1234. from_tss.fs = get_fs();
  1235. from_tss.gs = get_gs();
  1236. set_fs(to_tss.fs);
  1237. set_gs(to_tss.gs);
  1238. auto& processor = Processor::current();
  1239. auto& tls_descriptor = processor.get_gdt_entry(GDT_SELECTOR_TLS);
  1240. tls_descriptor.set_base(to_thread->thread_specific_data());
  1241. tls_descriptor.set_limit(to_thread->thread_specific_region_size());
  1242. if (from_tss.cr3 != to_tss.cr3)
  1243. write_cr3(to_tss.cr3);
  1244. to_thread->set_cpu(processor.get_id());
  1245. processor.restore_in_critical(to_thread->saved_critical());
  1246. asm volatile("fxrstor %0" ::"m"(to_thread->fpu_state()));
  1247. // TODO: debug registers
  1248. // TODO: ioperm?
  1249. }
  1250. #define ENTER_THREAD_CONTEXT_ARGS_SIZE (2 * 4) // to_thread, from_thread
  1251. void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
  1252. {
  1253. VERIFY(!in_irq());
  1254. VERIFY(m_in_critical == 1);
  1255. VERIFY(is_kernel_mode());
  1256. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
  1257. from_thread->save_critical(m_in_critical);
  1258. #if ARCH(I386)
  1259. // clang-format off
  1260. // Switch to new thread context, passing from_thread and to_thread
  1261. // through to the new context using registers edx and eax
  1262. asm volatile(
  1263. // NOTE: changing how much we push to the stack affects
  1264. // SWITCH_CONTEXT_TO_STACK_SIZE and thread_context_first_enter()!
  1265. "pushfl \n"
  1266. "pushl %%ebx \n"
  1267. "pushl %%esi \n"
  1268. "pushl %%edi \n"
  1269. "pushl %%ebp \n"
  1270. "movl %%esp, %[from_esp] \n"
  1271. "movl $1f, %[from_eip] \n"
  1272. "movl %[to_esp0], %%ebx \n"
  1273. "movl %%ebx, %[tss_esp0] \n"
  1274. "movl %[to_esp], %%esp \n"
  1275. "pushl %[to_thread] \n"
  1276. "pushl %[from_thread] \n"
  1277. "pushl %[to_eip] \n"
  1278. "cld \n"
  1279. "jmp enter_thread_context \n"
  1280. "1: \n"
  1281. "popl %%edx \n"
  1282. "popl %%eax \n"
  1283. "popl %%ebp \n"
  1284. "popl %%edi \n"
  1285. "popl %%esi \n"
  1286. "popl %%ebx \n"
  1287. "popfl \n"
  1288. : [from_esp] "=m" (from_thread->tss().esp),
  1289. [from_eip] "=m" (from_thread->tss().eip),
  1290. [tss_esp0] "=m" (m_tss.esp0),
  1291. "=d" (from_thread), // needed so that from_thread retains the correct value
  1292. "=a" (to_thread) // needed so that to_thread retains the correct value
  1293. : [to_esp] "g" (to_thread->tss().esp),
  1294. [to_esp0] "g" (to_thread->tss().esp0),
  1295. [to_eip] "c" (to_thread->tss().eip),
  1296. [from_thread] "d" (from_thread),
  1297. [to_thread] "a" (to_thread)
  1298. : "memory"
  1299. );
  1300. // clang-format on
  1301. #else
  1302. PANIC("Context switching not implemented.");
  1303. #endif
  1304. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  1305. Processor::current().restore_in_critical(to_thread->saved_critical());
  1306. }
  1307. extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
  1308. {
  1309. VERIFY(!are_interrupts_enabled());
  1310. VERIFY(is_kernel_mode());
  1311. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  1312. VERIFY(to_thread == Thread::current());
  1313. Scheduler::enter_current(*from_thread, true);
  1314. // Since we got here and don't have Scheduler::context_switch in the
  1315. // call stack (because this is the first time we switched into this
  1316. // context), we need to notify the scheduler so that it can release
  1317. // the scheduler lock. We don't want to enable interrupts at this point
  1318. // as we're still in the middle of a context switch. Doing so could
  1319. // trigger a context switch within a context switch, leading to a crash.
  1320. Scheduler::leave_on_first_switch(trap->regs->eflags & ~0x200);
  1321. }
  1322. extern "C" void thread_context_first_enter(void);
  1323. // clang-format off
  1324. asm(
  1325. // enter_thread_context returns to here first time a thread is executing
  1326. ".globl thread_context_first_enter \n"
  1327. "thread_context_first_enter: \n"
  1328. // switch_context will have pushed from_thread and to_thread to our new
  1329. // stack prior to thread_context_first_enter() being called, and the
  1330. // pointer to TrapFrame was the top of the stack before that
  1331. " movl 8(%esp), %ebx \n" // save pointer to TrapFrame
  1332. " cld \n"
  1333. " call context_first_init \n"
  1334. " addl $" __STRINGIFY(ENTER_THREAD_CONTEXT_ARGS_SIZE) ", %esp \n"
  1335. " movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
  1336. " jmp common_trap_exit \n"
  1337. );
  1338. // clang-format on
  1339. void exit_kernel_thread(void)
  1340. {
  1341. Thread::current()->exit();
  1342. }
  1343. u32 Processor::init_context(Thread& thread, bool leave_crit)
  1344. {
  1345. VERIFY(is_kernel_mode());
  1346. VERIFY(g_scheduler_lock.is_locked());
  1347. if (leave_crit) {
  1348. // Leave the critical section we set up in in Process::exec,
  1349. // but because we still have the scheduler lock we should end up with 1
  1350. m_in_critical--; // leave it without triggering anything or restoring flags
  1351. VERIFY(in_critical() == 1);
  1352. }
  1353. u32 kernel_stack_top = thread.kernel_stack_top();
  1354. // Add a random offset between 0-256 (16-byte aligned)
  1355. kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
  1356. u32 stack_top = kernel_stack_top;
  1357. // TODO: handle NT?
  1358. VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
  1359. auto& tss = thread.tss();
  1360. bool return_to_user = (tss.cs & 3) != 0;
  1361. // make room for an interrupt frame
  1362. if (!return_to_user) {
  1363. // userspace_esp and userspace_ss are not popped off by iret
  1364. // unless we're switching back to user mode
  1365. stack_top -= sizeof(RegisterState) - 2 * sizeof(u32);
  1366. // For kernel threads we'll push the thread function argument
  1367. // which should be in tss.esp and exit_kernel_thread as return
  1368. // address.
  1369. stack_top -= 2 * sizeof(u32);
  1370. *reinterpret_cast<u32*>(kernel_stack_top - 2 * sizeof(u32)) = tss.esp;
  1371. *reinterpret_cast<u32*>(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread);
  1372. } else {
  1373. stack_top -= sizeof(RegisterState);
  1374. }
  1375. // we want to end up 16-byte aligned, %esp + 4 should be aligned
  1376. stack_top -= sizeof(u32);
  1377. *reinterpret_cast<u32*>(kernel_stack_top - sizeof(u32)) = 0;
  1378. // set up the stack so that after returning from thread_context_first_enter()
  1379. // we will end up either in kernel mode or user mode, depending on how the thread is set up
  1380. // However, the first step is to always start in kernel mode with thread_context_first_enter
  1381. RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
  1382. iretframe.ss = tss.ss;
  1383. iretframe.gs = tss.gs;
  1384. iretframe.fs = tss.fs;
  1385. iretframe.es = tss.es;
  1386. iretframe.ds = tss.ds;
  1387. iretframe.edi = tss.edi;
  1388. iretframe.esi = tss.esi;
  1389. iretframe.ebp = tss.ebp;
  1390. iretframe.esp = 0;
  1391. iretframe.ebx = tss.ebx;
  1392. iretframe.edx = tss.edx;
  1393. iretframe.ecx = tss.ecx;
  1394. iretframe.eax = tss.eax;
  1395. iretframe.eflags = tss.eflags;
  1396. iretframe.eip = tss.eip;
  1397. iretframe.cs = tss.cs;
  1398. if (return_to_user) {
  1399. iretframe.userspace_esp = tss.esp;
  1400. iretframe.userspace_ss = tss.ss;
  1401. }
  1402. // make space for a trap frame
  1403. stack_top -= sizeof(TrapFrame);
  1404. TrapFrame& trap = *reinterpret_cast<TrapFrame*>(stack_top);
  1405. trap.regs = &iretframe;
  1406. trap.prev_irq_level = 0;
  1407. trap.next_trap = nullptr;
  1408. stack_top -= sizeof(u32); // pointer to TrapFrame
  1409. *reinterpret_cast<u32*>(stack_top) = stack_top + 4;
  1410. if constexpr (CONTEXT_SWITCH_DEBUG) {
  1411. if (return_to_user) {
  1412. dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}",
  1413. thread,
  1414. VirtualAddress(&thread),
  1415. iretframe.cs, tss.eip,
  1416. VirtualAddress(tss.esp),
  1417. VirtualAddress(stack_top),
  1418. iretframe.userspace_ss,
  1419. iretframe.userspace_esp);
  1420. } else {
  1421. dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}",
  1422. thread,
  1423. VirtualAddress(&thread),
  1424. iretframe.cs, tss.eip,
  1425. VirtualAddress(tss.esp),
  1426. VirtualAddress(stack_top));
  1427. }
  1428. }
  1429. // make switch_context() always first return to thread_context_first_enter()
  1430. // in kernel mode, so set up these values so that we end up popping iretframe
  1431. // off the stack right after the context switch completed, at which point
  1432. // control is transferred to what iretframe is pointing to.
  1433. tss.eip = FlatPtr(&thread_context_first_enter);
  1434. tss.esp0 = kernel_stack_top;
  1435. tss.esp = stack_top;
  1436. tss.cs = GDT_SELECTOR_CODE0;
  1437. tss.ds = GDT_SELECTOR_DATA0;
  1438. tss.es = GDT_SELECTOR_DATA0;
  1439. tss.gs = GDT_SELECTOR_DATA0;
  1440. tss.ss = GDT_SELECTOR_DATA0;
  1441. tss.fs = GDT_SELECTOR_PROC;
  1442. return stack_top;
  1443. }
  1444. extern "C" u32 do_init_context(Thread* thread, u32 flags)
  1445. {
  1446. VERIFY_INTERRUPTS_DISABLED();
  1447. thread->tss().eflags = flags;
  1448. return Processor::current().init_context(*thread, true);
  1449. }
  1450. extern "C" void do_assume_context(Thread* thread, u32 flags);
  1451. #if ARCH(I386)
  1452. // clang-format off
  1453. asm(
  1454. ".global do_assume_context \n"
  1455. "do_assume_context: \n"
  1456. " movl 4(%esp), %ebx \n"
  1457. " movl 8(%esp), %esi \n"
  1458. // We're going to call Processor::init_context, so just make sure
  1459. // we have enough stack space so we don't stomp over it
  1460. " subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n"
  1461. " pushl %esi \n"
  1462. " pushl %ebx \n"
  1463. " cld \n"
  1464. " call do_init_context \n"
  1465. " addl $8, %esp \n"
  1466. " movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us
  1467. " pushl %ebx \n" // push to_thread
  1468. " pushl %ebx \n" // push from_thread
  1469. " pushl $thread_context_first_enter \n" // should be same as tss.eip
  1470. " jmp enter_thread_context \n"
  1471. );
  1472. // clang-format on
  1473. #endif
  1474. void Processor::assume_context(Thread& thread, FlatPtr flags)
  1475. {
  1476. dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
  1477. VERIFY_INTERRUPTS_DISABLED();
  1478. Scheduler::prepare_after_exec();
  1479. // in_critical() should be 2 here. The critical section in Process::exec
  1480. // and then the scheduler lock
  1481. VERIFY(Processor::current().in_critical() == 2);
  1482. #if ARCH(I386)
  1483. do_assume_context(&thread, flags);
  1484. #elif ARCH(X86_64)
  1485. (void)flags;
  1486. TODO();
  1487. #endif
  1488. VERIFY_NOT_REACHED();
  1489. }
  1490. extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
  1491. {
  1492. VERIFY(g_scheduler_lock.own_lock());
  1493. // Because init_finished() will wait on the other APs, we need
  1494. // to release the scheduler lock so that the other APs can also get
  1495. // to this point
  1496. // The target flags will get restored upon leaving the trap
  1497. u32 prev_flags = cpu_flags();
  1498. Scheduler::leave_on_first_switch(prev_flags);
  1499. }
  1500. extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
  1501. {
  1502. // We need to re-acquire the scheduler lock before a context switch
  1503. // transfers control into the idle loop, which needs the lock held
  1504. Scheduler::prepare_for_idle_loop();
  1505. }
  1506. UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
  1507. {
  1508. VERIFY(initial_thread.process().is_kernel_process());
  1509. auto& tss = initial_thread.tss();
  1510. m_tss = tss;
  1511. m_tss.esp0 = tss.esp0;
  1512. m_tss.ss0 = GDT_SELECTOR_DATA0;
  1513. // user mode needs to be able to switch to kernel mode:
  1514. m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
  1515. m_tss.fs = GDT_SELECTOR_PROC | 3;
  1516. m_scheduler_initialized = true;
  1517. #if ARCH(I386)
  1518. // clang-format off
  1519. asm volatile(
  1520. "movl %[new_esp], %%esp \n" // switch to new stack
  1521. "pushl %[from_to_thread] \n" // to_thread
  1522. "pushl %[from_to_thread] \n" // from_thread
  1523. "pushl $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n"
  1524. "pushl %[new_eip] \n" // save the entry eip to the stack
  1525. "movl %%esp, %%ebx \n"
  1526. "addl $20, %%ebx \n" // calculate pointer to TrapFrame
  1527. "pushl %%ebx \n"
  1528. "cld \n"
  1529. "pushl %[cpu] \n" // push argument for init_finished before register is clobbered
  1530. "call pre_init_finished \n"
  1531. "call init_finished \n"
  1532. "addl $4, %%esp \n"
  1533. "call post_init_finished \n"
  1534. "call enter_trap_no_irq \n"
  1535. "addl $4, %%esp \n"
  1536. "lret \n"
  1537. :: [new_esp] "g" (tss.esp),
  1538. [new_eip] "a" (tss.eip),
  1539. [from_to_thread] "b" (&initial_thread),
  1540. [cpu] "c" (id())
  1541. );
  1542. // clang-format on
  1543. #endif
  1544. VERIFY_NOT_REACHED();
  1545. }
  1546. void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
  1547. {
  1548. VERIFY_INTERRUPTS_DISABLED();
  1549. VERIFY(&Processor::current() == this);
  1550. trap.prev_irq_level = m_in_irq;
  1551. if (raise_irq)
  1552. m_in_irq++;
  1553. auto* current_thread = Processor::current_thread();
  1554. if (current_thread) {
  1555. auto& current_trap = current_thread->current_trap();
  1556. trap.next_trap = current_trap;
  1557. current_trap = &trap;
  1558. // The cs register of this trap tells us where we will return back to
  1559. current_thread->set_previous_mode(((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
  1560. } else {
  1561. trap.next_trap = nullptr;
  1562. }
  1563. }
  1564. void Processor::exit_trap(TrapFrame& trap)
  1565. {
  1566. VERIFY_INTERRUPTS_DISABLED();
  1567. VERIFY(&Processor::current() == this);
  1568. VERIFY(m_in_irq >= trap.prev_irq_level);
  1569. m_in_irq = trap.prev_irq_level;
  1570. smp_process_pending_messages();
  1571. if (!m_in_irq && !m_in_critical)
  1572. check_invoke_scheduler();
  1573. auto* current_thread = Processor::current_thread();
  1574. if (current_thread) {
  1575. auto& current_trap = current_thread->current_trap();
  1576. current_trap = trap.next_trap;
  1577. if (current_trap) {
  1578. VERIFY(current_trap->regs);
  1579. // If we have another higher level trap then we probably returned
  1580. // from an interrupt or irq handler. The cs register of the
  1581. // new/higher level trap tells us what the mode prior to it was
  1582. current_thread->set_previous_mode(((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
  1583. } else {
  1584. // If we don't have a higher level trap then we're back in user mode.
  1585. // Unless we're a kernel process, in which case we're always in kernel mode
  1586. current_thread->set_previous_mode(current_thread->process().is_kernel_process() ? Thread::PreviousMode::KernelMode : Thread::PreviousMode::UserMode);
  1587. }
  1588. }
  1589. }
  1590. void Processor::check_invoke_scheduler()
  1591. {
  1592. VERIFY(!m_in_irq);
  1593. VERIFY(!m_in_critical);
  1594. if (m_invoke_scheduler_async && m_scheduler_initialized) {
  1595. m_invoke_scheduler_async = false;
  1596. Scheduler::invoke_async();
  1597. }
  1598. }
  1599. void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
  1600. {
  1601. auto ptr = vaddr.as_ptr();
  1602. while (page_count > 0) {
  1603. // clang-format off
  1604. asm volatile("invlpg %0"
  1605. :
  1606. : "m"(*ptr)
  1607. : "memory");
  1608. // clang-format on
  1609. ptr += PAGE_SIZE;
  1610. page_count--;
  1611. }
  1612. }
  1613. void Processor::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
  1614. {
  1615. if (s_smp_enabled && (!is_user_address(vaddr) || Process::current()->thread_count() > 1))
  1616. smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
  1617. else
  1618. flush_tlb_local(vaddr, page_count);
  1619. }
  1620. static volatile ProcessorMessage* s_message_pool;
  1621. void Processor::smp_return_to_pool(ProcessorMessage& msg)
  1622. {
  1623. ProcessorMessage* next = nullptr;
  1624. do {
  1625. msg.next = next;
  1626. } while (!atomic_compare_exchange_strong(&s_message_pool, next, &msg, AK::MemoryOrder::memory_order_acq_rel));
  1627. }
  1628. ProcessorMessage& Processor::smp_get_from_pool()
  1629. {
  1630. ProcessorMessage* msg;
  1631. // The assumption is that messages are never removed from the pool!
  1632. for (;;) {
  1633. msg = atomic_load(&s_message_pool, AK::MemoryOrder::memory_order_consume);
  1634. if (!msg) {
  1635. if (!Processor::current().smp_process_pending_messages()) {
  1636. // TODO: pause for a bit?
  1637. }
  1638. continue;
  1639. }
  1640. // If another processor were to use this message in the meanwhile,
  1641. // "msg" is still valid (because it never gets freed). We'd detect
  1642. // this because the expected value "msg" and pool would
  1643. // no longer match, and the compare_exchange will fail. But accessing
  1644. // "msg->next" is always safe here.
  1645. if (atomic_compare_exchange_strong(&s_message_pool, msg, msg->next, AK::MemoryOrder::memory_order_acq_rel)) {
  1646. // We successfully "popped" this available message
  1647. break;
  1648. }
  1649. }
  1650. VERIFY(msg != nullptr);
  1651. return *msg;
  1652. }
  1653. Atomic<u32> Processor::s_idle_cpu_mask { 0 };
  1654. u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
  1655. {
  1656. VERIFY(Processor::current().in_critical());
  1657. VERIFY(wake_count > 0);
  1658. if (!s_smp_enabled)
  1659. return 0;
  1660. // Wake at most N - 1 processors
  1661. if (wake_count >= Processor::count()) {
  1662. wake_count = Processor::count() - 1;
  1663. VERIFY(wake_count > 0);
  1664. }
  1665. u32 current_id = Processor::current().id();
  1666. u32 did_wake_count = 0;
  1667. auto& apic = APIC::the();
  1668. while (did_wake_count < wake_count) {
  1669. // Try to get a set of idle CPUs and flip them to busy
  1670. u32 idle_mask = s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
  1671. u32 idle_count = __builtin_popcountl(idle_mask);
  1672. if (idle_count == 0)
  1673. break; // No (more) idle processor available
  1674. u32 found_mask = 0;
  1675. for (u32 i = 0; i < idle_count; i++) {
  1676. u32 cpu = __builtin_ffsl(idle_mask) - 1;
  1677. idle_mask &= ~(1u << cpu);
  1678. found_mask |= 1u << cpu;
  1679. }
  1680. idle_mask = s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
  1681. if (idle_mask == 0)
  1682. continue; // All of them were flipped to busy, try again
  1683. idle_count = __builtin_popcountl(idle_mask);
  1684. for (u32 i = 0; i < idle_count; i++) {
  1685. u32 cpu = __builtin_ffsl(idle_mask) - 1;
  1686. idle_mask &= ~(1u << cpu);
  1687. // Send an IPI to that CPU to wake it up. There is a possibility
  1688. // someone else woke it up as well, or that it woke up due to
  1689. // a timer interrupt. But we tried hard to avoid this...
  1690. apic.send_ipi(cpu);
  1691. did_wake_count++;
  1692. }
  1693. }
  1694. return did_wake_count;
  1695. }
  1696. UNMAP_AFTER_INIT void Processor::smp_enable()
  1697. {
  1698. size_t msg_pool_size = Processor::count() * 100u;
  1699. size_t msg_entries_cnt = Processor::count();
  1700. auto msgs = new ProcessorMessage[msg_pool_size];
  1701. auto msg_entries = new ProcessorMessageEntry[msg_pool_size * msg_entries_cnt];
  1702. size_t msg_entry_i = 0;
  1703. for (size_t i = 0; i < msg_pool_size; i++, msg_entry_i += msg_entries_cnt) {
  1704. auto& msg = msgs[i];
  1705. msg.next = i < msg_pool_size - 1 ? &msgs[i + 1] : nullptr;
  1706. msg.per_proc_entries = &msg_entries[msg_entry_i];
  1707. for (size_t k = 0; k < msg_entries_cnt; k++)
  1708. msg_entries[msg_entry_i + k].msg = &msg;
  1709. }
  1710. atomic_store(&s_message_pool, &msgs[0], AK::MemoryOrder::memory_order_release);
  1711. // Start sending IPI messages
  1712. s_smp_enabled = true;
  1713. }
  1714. void Processor::smp_cleanup_message(ProcessorMessage& msg)
  1715. {
  1716. switch (msg.type) {
  1717. case ProcessorMessage::CallbackWithData:
  1718. if (msg.callback_with_data.free)
  1719. msg.callback_with_data.free(msg.callback_with_data.data);
  1720. break;
  1721. default:
  1722. break;
  1723. }
  1724. }
  1725. bool Processor::smp_process_pending_messages()
  1726. {
  1727. bool did_process = false;
  1728. u32 prev_flags;
  1729. enter_critical(prev_flags);
  1730. if (auto pending_msgs = atomic_exchange(&m_message_queue, nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
  1731. // We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
  1732. auto reverse_list =
  1733. [](ProcessorMessageEntry* list) -> ProcessorMessageEntry* {
  1734. ProcessorMessageEntry* rev_list = nullptr;
  1735. while (list) {
  1736. auto next = list->next;
  1737. list->next = rev_list;
  1738. rev_list = list;
  1739. list = next;
  1740. }
  1741. return rev_list;
  1742. };
  1743. pending_msgs = reverse_list(pending_msgs);
  1744. // now process in the right order
  1745. ProcessorMessageEntry* next_msg;
  1746. for (auto cur_msg = pending_msgs; cur_msg; cur_msg = next_msg) {
  1747. next_msg = cur_msg->next;
  1748. auto msg = cur_msg->msg;
  1749. dbgln_if(SMP_DEBUG, "SMP[{}]: Processing message {}", id(), VirtualAddress(msg));
  1750. switch (msg->type) {
  1751. case ProcessorMessage::Callback:
  1752. msg->callback.handler();
  1753. break;
  1754. case ProcessorMessage::CallbackWithData:
  1755. msg->callback_with_data.handler(msg->callback_with_data.data);
  1756. break;
  1757. case ProcessorMessage::FlushTlb:
  1758. if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
  1759. // We assume that we don't cross into kernel land!
  1760. VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
  1761. if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
  1762. // This processor isn't using this page directory right now, we can ignore this request
  1763. dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
  1764. break;
  1765. }
  1766. }
  1767. flush_tlb_local(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count);
  1768. break;
  1769. }
  1770. bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
  1771. auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
  1772. VERIFY(prev_refs != 0);
  1773. if (prev_refs == 1) {
  1774. // All processors handled this. If this is an async message,
  1775. // we need to clean it up and return it to the pool
  1776. if (is_async) {
  1777. smp_cleanup_message(*msg);
  1778. smp_return_to_pool(*msg);
  1779. }
  1780. }
  1781. if (m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed))
  1782. halt_this();
  1783. }
  1784. did_process = true;
  1785. } else if (m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed)) {
  1786. halt_this();
  1787. }
  1788. leave_critical(prev_flags);
  1789. return did_process;
  1790. }
  1791. bool Processor::smp_queue_message(ProcessorMessage& msg)
  1792. {
  1793. // Note that it's quite possible that the other processor may pop
  1794. // the queue at any given time. We rely on the fact that the messages
  1795. // are pooled and never get freed!
  1796. auto& msg_entry = msg.per_proc_entries[id()];
  1797. VERIFY(msg_entry.msg == &msg);
  1798. ProcessorMessageEntry* next = nullptr;
  1799. do {
  1800. msg_entry.next = next;
  1801. } while (!atomic_compare_exchange_strong(&m_message_queue, next, &msg_entry, AK::MemoryOrder::memory_order_acq_rel));
  1802. return next == nullptr;
  1803. }
  1804. void Processor::smp_broadcast_message(ProcessorMessage& msg)
  1805. {
  1806. auto& cur_proc = Processor::current();
  1807. dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
  1808. atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
  1809. VERIFY(msg.refs > 0);
  1810. bool need_broadcast = false;
  1811. for_each(
  1812. [&](Processor& proc) -> IterationDecision {
  1813. if (&proc != &cur_proc) {
  1814. if (proc.smp_queue_message(msg))
  1815. need_broadcast = true;
  1816. }
  1817. return IterationDecision::Continue;
  1818. });
  1819. // Now trigger an IPI on all other APs (unless all targets already had messages queued)
  1820. if (need_broadcast)
  1821. APIC::the().broadcast_ipi();
  1822. }
  1823. void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
  1824. {
  1825. auto& cur_proc = Processor::current();
  1826. VERIFY(!msg.async);
  1827. // If synchronous then we must cleanup and return the message back
  1828. // to the pool. Otherwise, the last processor to complete it will return it
  1829. while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
  1830. // TODO: pause for a bit?
  1831. // We need to process any messages that may have been sent to
  1832. // us while we're waiting. This also checks if another processor
  1833. // may have requested us to halt.
  1834. cur_proc.smp_process_pending_messages();
  1835. }
  1836. smp_cleanup_message(msg);
  1837. smp_return_to_pool(msg);
  1838. }
  1839. void Processor::smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async)
  1840. {
  1841. auto& msg = smp_get_from_pool();
  1842. msg.async = async;
  1843. msg.type = ProcessorMessage::CallbackWithData;
  1844. msg.callback_with_data.handler = callback;
  1845. msg.callback_with_data.data = data;
  1846. msg.callback_with_data.free = free_data;
  1847. smp_broadcast_message(msg);
  1848. if (!async)
  1849. smp_broadcast_wait_sync(msg);
  1850. }
  1851. void Processor::smp_broadcast(void (*callback)(), bool async)
  1852. {
  1853. auto& msg = smp_get_from_pool();
  1854. msg.async = async;
  1855. msg.type = ProcessorMessage::CallbackWithData;
  1856. msg.callback.handler = callback;
  1857. smp_broadcast_message(msg);
  1858. if (!async)
  1859. smp_broadcast_wait_sync(msg);
  1860. }
  1861. void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
  1862. {
  1863. auto& cur_proc = Processor::current();
  1864. VERIFY(cpu != cur_proc.get_id());
  1865. auto& target_proc = processors()[cpu];
  1866. msg.async = async;
  1867. dbgln_if(SMP_DEBUG, "SMP[{}]: Send message {} to cpu #{} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_proc));
  1868. atomic_store(&msg.refs, 1u, AK::MemoryOrder::memory_order_release);
  1869. if (target_proc->smp_queue_message(msg)) {
  1870. APIC::the().send_ipi(cpu);
  1871. }
  1872. if (!async) {
  1873. // If synchronous then we must cleanup and return the message back
  1874. // to the pool. Otherwise, the last processor to complete it will return it
  1875. while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
  1876. // TODO: pause for a bit?
  1877. // We need to process any messages that may have been sent to
  1878. // us while we're waiting. This also checks if another processor
  1879. // may have requested us to halt.
  1880. cur_proc.smp_process_pending_messages();
  1881. }
  1882. smp_cleanup_message(msg);
  1883. smp_return_to_pool(msg);
  1884. }
  1885. }
  1886. void Processor::smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async)
  1887. {
  1888. auto& msg = smp_get_from_pool();
  1889. msg.type = ProcessorMessage::CallbackWithData;
  1890. msg.callback_with_data.handler = callback;
  1891. msg.callback_with_data.data = data;
  1892. msg.callback_with_data.free = free_data;
  1893. smp_unicast_message(cpu, msg, async);
  1894. }
  1895. void Processor::smp_unicast(u32 cpu, void (*callback)(), bool async)
  1896. {
  1897. auto& msg = smp_get_from_pool();
  1898. msg.type = ProcessorMessage::CallbackWithData;
  1899. msg.callback.handler = callback;
  1900. smp_unicast_message(cpu, msg, async);
  1901. }
  1902. void Processor::smp_broadcast_flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
  1903. {
  1904. auto& msg = smp_get_from_pool();
  1905. msg.async = false;
  1906. msg.type = ProcessorMessage::FlushTlb;
  1907. msg.flush_tlb.page_directory = page_directory;
  1908. msg.flush_tlb.ptr = vaddr.as_ptr();
  1909. msg.flush_tlb.page_count = page_count;
  1910. smp_broadcast_message(msg);
  1911. // While the other processors handle this request, we'll flush ours
  1912. flush_tlb_local(vaddr, page_count);
  1913. // Now wait until everybody is done as well
  1914. smp_broadcast_wait_sync(msg);
  1915. }
  1916. void Processor::smp_broadcast_halt()
  1917. {
  1918. // We don't want to use a message, because this could have been triggered
  1919. // by being out of memory and we might not be able to get a message
  1920. for_each(
  1921. [&](Processor& proc) -> IterationDecision {
  1922. proc.m_halt_requested.store(true, AK::MemoryOrder::memory_order_release);
  1923. return IterationDecision::Continue;
  1924. });
  1925. // Now trigger an IPI on all other APs
  1926. APIC::the().broadcast_ipi();
  1927. }
  1928. void Processor::Processor::halt()
  1929. {
  1930. if (s_smp_enabled)
  1931. smp_broadcast_halt();
  1932. halt_this();
  1933. }
  1934. UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
  1935. {
  1936. size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]);
  1937. for (size_t i = 0; i < pool_count; i++) {
  1938. auto& entry = m_deferred_call_pool[i];
  1939. entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
  1940. entry.was_allocated = false;
  1941. }
  1942. m_pending_deferred_calls = nullptr;
  1943. m_free_deferred_call_pool_entry = &m_deferred_call_pool[0];
  1944. }
  1945. void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
  1946. {
  1947. VERIFY(m_in_critical);
  1948. VERIFY(!entry->was_allocated);
  1949. entry->next = m_free_deferred_call_pool_entry;
  1950. m_free_deferred_call_pool_entry = entry;
  1951. }
  1952. DeferredCallEntry* Processor::deferred_call_get_free()
  1953. {
  1954. VERIFY(m_in_critical);
  1955. if (m_free_deferred_call_pool_entry) {
  1956. // Fast path, we have an entry in our pool
  1957. auto* entry = m_free_deferred_call_pool_entry;
  1958. m_free_deferred_call_pool_entry = entry->next;
  1959. VERIFY(!entry->was_allocated);
  1960. return entry;
  1961. }
  1962. auto* entry = new DeferredCallEntry;
  1963. entry->was_allocated = true;
  1964. return entry;
  1965. }
  1966. void Processor::deferred_call_execute_pending()
  1967. {
  1968. VERIFY(m_in_critical);
  1969. if (!m_pending_deferred_calls)
  1970. return;
  1971. auto* pending_list = m_pending_deferred_calls;
  1972. m_pending_deferred_calls = nullptr;
  1973. // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
  1974. auto reverse_list =
  1975. [](DeferredCallEntry* list) -> DeferredCallEntry* {
  1976. DeferredCallEntry* rev_list = nullptr;
  1977. while (list) {
  1978. auto next = list->next;
  1979. list->next = rev_list;
  1980. rev_list = list;
  1981. list = next;
  1982. }
  1983. return rev_list;
  1984. };
  1985. pending_list = reverse_list(pending_list);
  1986. do {
  1987. // Call the appropriate callback handler
  1988. if (pending_list->have_data) {
  1989. pending_list->callback_with_data.handler(pending_list->callback_with_data.data);
  1990. if (pending_list->callback_with_data.free)
  1991. pending_list->callback_with_data.free(pending_list->callback_with_data.data);
  1992. } else {
  1993. pending_list->callback.handler();
  1994. }
  1995. // Return the entry back to the pool, or free it
  1996. auto* next = pending_list->next;
  1997. if (pending_list->was_allocated)
  1998. delete pending_list;
  1999. else
  2000. deferred_call_return_to_pool(pending_list);
  2001. pending_list = next;
  2002. } while (pending_list);
  2003. }
  2004. void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
  2005. {
  2006. VERIFY(m_in_critical);
  2007. entry->next = m_pending_deferred_calls;
  2008. m_pending_deferred_calls = entry;
  2009. }
  2010. void Processor::deferred_call_queue(void (*callback)())
  2011. {
  2012. // NOTE: If we are called outside of a critical section and outside
  2013. // of an irq handler, the function will be executed before we return!
  2014. ScopedCritical critical;
  2015. auto& cur_proc = Processor::current();
  2016. auto* entry = cur_proc.deferred_call_get_free();
  2017. entry->have_data = false;
  2018. entry->callback.handler = callback;
  2019. cur_proc.deferred_call_queue_entry(entry);
  2020. }
  2021. void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*))
  2022. {
  2023. // NOTE: If we are called outside of a critical section and outside
  2024. // of an irq handler, the function will be executed before we return!
  2025. ScopedCritical critical;
  2026. auto& cur_proc = Processor::current();
  2027. auto* entry = cur_proc.deferred_call_get_free();
  2028. entry->have_data = true;
  2029. entry->callback_with_data.handler = callback;
  2030. entry->callback_with_data.data = data;
  2031. entry->callback_with_data.free = free_data;
  2032. cur_proc.deferred_call_queue_entry(entry);
  2033. }
  2034. UNMAP_AFTER_INIT void Processor::gdt_init()
  2035. {
  2036. m_gdt_length = 0;
  2037. m_gdtr.address = nullptr;
  2038. m_gdtr.limit = 0;
  2039. write_raw_gdt_entry(0x0000, 0x00000000, 0x00000000);
  2040. write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00cf9a00); // code0
  2041. write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00cf9200); // data0
  2042. write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00cffa00); // code3
  2043. write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x00cff200); // data3
  2044. Descriptor tls_descriptor {};
  2045. tls_descriptor.low = tls_descriptor.high = 0;
  2046. tls_descriptor.dpl = 3;
  2047. tls_descriptor.segment_present = 1;
  2048. tls_descriptor.granularity = 0;
  2049. tls_descriptor.operation_size64 = 0;
  2050. tls_descriptor.operation_size32 = 1;
  2051. tls_descriptor.descriptor_type = 1;
  2052. tls_descriptor.type = 2;
  2053. write_gdt_entry(GDT_SELECTOR_TLS, tls_descriptor); // tls3
  2054. Descriptor fs_descriptor {};
  2055. fs_descriptor.set_base(VirtualAddress { this });
  2056. fs_descriptor.set_limit(sizeof(Processor));
  2057. fs_descriptor.dpl = 0;
  2058. fs_descriptor.segment_present = 1;
  2059. fs_descriptor.granularity = 0;
  2060. fs_descriptor.operation_size64 = 0;
  2061. fs_descriptor.operation_size32 = 1;
  2062. fs_descriptor.descriptor_type = 1;
  2063. fs_descriptor.type = 2;
  2064. write_gdt_entry(GDT_SELECTOR_PROC, fs_descriptor); // fs0
  2065. Descriptor tss_descriptor {};
  2066. tss_descriptor.set_base(VirtualAddress { &m_tss });
  2067. tss_descriptor.set_limit(sizeof(TSS32));
  2068. tss_descriptor.dpl = 0;
  2069. tss_descriptor.segment_present = 1;
  2070. tss_descriptor.granularity = 0;
  2071. tss_descriptor.operation_size64 = 0;
  2072. tss_descriptor.operation_size32 = 1;
  2073. tss_descriptor.descriptor_type = 0;
  2074. tss_descriptor.type = 9;
  2075. write_gdt_entry(GDT_SELECTOR_TSS, tss_descriptor); // tss
  2076. flush_gdt();
  2077. load_task_register(GDT_SELECTOR_TSS);
  2078. asm volatile(
  2079. "mov %%ax, %%ds\n"
  2080. "mov %%ax, %%es\n"
  2081. "mov %%ax, %%gs\n"
  2082. "mov %%ax, %%ss\n" ::"a"(GDT_SELECTOR_DATA0)
  2083. : "memory");
  2084. set_fs(GDT_SELECTOR_PROC);
  2085. #if ARCH(I386)
  2086. // Make sure CS points to the kernel code descriptor.
  2087. // clang-format off
  2088. asm volatile(
  2089. "ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
  2090. "sanity:\n");
  2091. // clang-format on
  2092. #endif
  2093. }
  2094. void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_regs, const RegisterState& kernel_regs)
  2095. {
  2096. ptrace_regs.eax = kernel_regs.eax,
  2097. ptrace_regs.ecx = kernel_regs.ecx,
  2098. ptrace_regs.edx = kernel_regs.edx,
  2099. ptrace_regs.ebx = kernel_regs.ebx,
  2100. ptrace_regs.esp = kernel_regs.userspace_esp,
  2101. ptrace_regs.ebp = kernel_regs.ebp,
  2102. ptrace_regs.esi = kernel_regs.esi,
  2103. ptrace_regs.edi = kernel_regs.edi,
  2104. ptrace_regs.eip = kernel_regs.eip,
  2105. ptrace_regs.eflags = kernel_regs.eflags,
  2106. ptrace_regs.cs = 0;
  2107. ptrace_regs.ss = 0;
  2108. ptrace_regs.ds = 0;
  2109. ptrace_regs.es = 0;
  2110. ptrace_regs.fs = 0;
  2111. ptrace_regs.gs = 0;
  2112. }
  2113. void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_regs, const PtraceRegisters& ptrace_regs)
  2114. {
  2115. kernel_regs.eax = ptrace_regs.eax;
  2116. kernel_regs.ecx = ptrace_regs.ecx;
  2117. kernel_regs.edx = ptrace_regs.edx;
  2118. kernel_regs.ebx = ptrace_regs.ebx;
  2119. kernel_regs.esp = ptrace_regs.esp;
  2120. kernel_regs.ebp = ptrace_regs.ebp;
  2121. kernel_regs.esi = ptrace_regs.esi;
  2122. kernel_regs.edi = ptrace_regs.edi;
  2123. kernel_regs.eip = ptrace_regs.eip;
  2124. kernel_regs.eflags = (kernel_regs.eflags & ~safe_eflags_mask) | (ptrace_regs.eflags & safe_eflags_mask);
  2125. }
  2126. }
  2127. #ifdef DEBUG
  2128. void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
  2129. {
  2130. asm volatile("cli");
  2131. dmesgln("ASSERTION FAILED: {}", msg);
  2132. dmesgln("{}:{} in {}", file, line, func);
  2133. // Switch back to the current process's page tables if there are any.
  2134. // Otherwise stack walking will be a disaster.
  2135. auto process = Process::current();
  2136. if (process)
  2137. MM.enter_process_paging_scope(*process);
  2138. Kernel::dump_backtrace();
  2139. Processor::halt();
  2140. }
  2141. #endif
  2142. NonMaskableInterruptDisabler::NonMaskableInterruptDisabler()
  2143. {
  2144. IO::out8(0x70, IO::in8(0x70) | 0x80);
  2145. }
  2146. NonMaskableInterruptDisabler::~NonMaskableInterruptDisabler()
  2147. {
  2148. IO::out8(0x70, IO::in8(0x70) & 0x7F);
  2149. }