CPU.cpp 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/ScopeGuard.h>
  28. #include <AK/String.h>
  29. #include <AK/StringBuilder.h>
  30. #include <AK/Types.h>
  31. #include <Kernel/Arch/i386/CPU.h>
  32. #include <Kernel/Arch/i386/ISRStubs.h>
  33. #include <Kernel/Arch/i386/ProcessorInfo.h>
  34. #include <Kernel/Arch/i386/SafeMem.h>
  35. #include <Kernel/Debug.h>
  36. #include <Kernel/IO.h>
  37. #include <Kernel/Interrupts/APIC.h>
  38. #include <Kernel/Interrupts/GenericInterruptHandler.h>
  39. #include <Kernel/Interrupts/IRQHandler.h>
  40. #include <Kernel/Interrupts/InterruptManagement.h>
  41. #include <Kernel/Interrupts/SharedIRQHandler.h>
  42. #include <Kernel/Interrupts/SpuriousInterruptHandler.h>
  43. #include <Kernel/Interrupts/UnhandledInterruptHandler.h>
  44. #include <Kernel/KSyms.h>
  45. #include <Kernel/Panic.h>
  46. #include <Kernel/Process.h>
  47. #include <Kernel/Random.h>
  48. #include <Kernel/SpinLock.h>
  49. #include <Kernel/Thread.h>
  50. #include <Kernel/VM/MemoryManager.h>
  51. #include <Kernel/VM/PageDirectory.h>
  52. #include <Kernel/VM/ProcessPagingScope.h>
  53. #include <LibC/mallocdefs.h>
  54. extern FlatPtr start_of_unmap_after_init;
  55. extern FlatPtr end_of_unmap_after_init;
  56. extern FlatPtr start_of_ro_after_init;
  57. extern FlatPtr end_of_ro_after_init;
  58. namespace Kernel {
  59. READONLY_AFTER_INIT static DescriptorTablePointer s_idtr;
  60. READONLY_AFTER_INIT static Descriptor s_idt[256];
  61. static GenericInterruptHandler* s_interrupt_handler[GENERIC_INTERRUPT_HANDLERS_COUNT];
  62. static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interrupts };
  63. // The compiler can't see the calls to these functions inside assembly.
  64. // Declare them, to avoid dead code warnings.
  65. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread);
  66. extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap);
  67. extern "C" u32 do_init_context(Thread* thread, u32 flags);
  68. extern "C" void exit_kernel_thread(void);
  69. extern "C" void pre_init_finished(void);
  70. extern "C" void post_init_finished(void);
  71. extern "C" void handle_interrupt(TrapFrame*);
  72. // clang-format off
  73. #define EH_ENTRY(ec, title) \
  74. extern "C" void title##_asm_entry(); \
  75. extern "C" void title##_handler(TrapFrame*); \
  76. asm( \
  77. ".globl " #title "_asm_entry\n" \
  78. "" #title "_asm_entry: \n" \
  79. " pusha\n" \
  80. " pushl %ds\n" \
  81. " pushl %es\n" \
  82. " pushl %fs\n" \
  83. " pushl %gs\n" \
  84. " pushl %ss\n" \
  85. " mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
  86. " mov %ax, %ds\n" \
  87. " mov %ax, %es\n" \
  88. " mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
  89. " mov %ax, %fs\n" \
  90. " pushl %esp \n" /* set TrapFrame::regs */ \
  91. " subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
  92. " pushl %esp \n" \
  93. " cld\n" \
  94. " call enter_trap_no_irq \n" \
  95. " call " #title "_handler\n" \
  96. " jmp common_trap_exit \n");
  97. #define EH_ENTRY_NO_CODE(ec, title) \
  98. extern "C" void title##_handler(TrapFrame*); \
  99. extern "C" void title##_asm_entry(); \
  100. asm( \
  101. ".globl " #title "_asm_entry\n" \
  102. "" #title "_asm_entry: \n" \
  103. " pushl $0x0\n" \
  104. " pusha\n" \
  105. " pushl %ds\n" \
  106. " pushl %es\n" \
  107. " pushl %fs\n" \
  108. " pushl %gs\n" \
  109. " pushl %ss\n" \
  110. " mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
  111. " mov %ax, %ds\n" \
  112. " mov %ax, %es\n" \
  113. " mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
  114. " mov %ax, %fs\n" \
  115. " pushl %esp \n" /* set TrapFrame::regs */ \
  116. " subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
  117. " pushl %esp \n" \
  118. " cld\n" \
  119. " call enter_trap_no_irq \n" \
  120. " call " #title "_handler\n" \
  121. " jmp common_trap_exit \n");
  122. // clang-format on
  123. static void dump(const RegisterState& regs)
  124. {
  125. u16 ss;
  126. u32 esp;
  127. if (!(regs.cs & 3)) {
  128. ss = regs.ss;
  129. esp = regs.esp;
  130. } else {
  131. ss = regs.userspace_ss;
  132. esp = regs.userspace_esp;
  133. }
  134. dbgln("Exception code: {:04x} (isr: {:04x})", regs.exception_code, regs.isr_number);
  135. dbgln(" pc={:04x}:{:08x} eflags={:08x}", (u16)regs.cs, regs.eip, regs.eflags);
  136. dbgln(" stack={:04x}:{:08x}", ss, esp);
  137. dbgln(" ds={:04x} es={:04x} fs={:04x} gs={:04x}", (u16)regs.ds, (u16)regs.es, (u16)regs.fs, (u16)regs.gs);
  138. dbgln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x}", regs.eax, regs.ebx, regs.ecx, regs.edx);
  139. dbgln(" ebp={:08x} esp={:08x} esi={:08x} edi={:08x}", regs.ebp, regs.esp, regs.esi, regs.edi);
  140. dbgln(" cr0={:08x} cr2={:08x} cr3={:08x} cr4={:08x}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
  141. }
  142. void handle_crash(RegisterState& regs, const char* description, int signal, bool out_of_memory)
  143. {
  144. auto process = Process::current();
  145. if (!process) {
  146. PANIC("{} with !current", description);
  147. }
  148. // If a process crashed while inspecting another process,
  149. // make sure we switch back to the right page tables.
  150. MM.enter_process_paging_scope(*process);
  151. dmesgln("CRASH: CPU #{} {} in ring {}", Processor::id(), description, (regs.cs & 3));
  152. dump(regs);
  153. if (!(regs.cs & 3)) {
  154. PANIC("Crash in ring 0");
  155. }
  156. cli();
  157. process->crash(signal, regs.eip, out_of_memory);
  158. }
  159. EH_ENTRY_NO_CODE(6, illegal_instruction);
  160. void illegal_instruction_handler(TrapFrame* trap)
  161. {
  162. clac();
  163. handle_crash(*trap->regs, "Illegal instruction", SIGILL);
  164. }
  165. EH_ENTRY_NO_CODE(0, divide_error);
  166. void divide_error_handler(TrapFrame* trap)
  167. {
  168. clac();
  169. handle_crash(*trap->regs, "Divide error", SIGFPE);
  170. }
  171. EH_ENTRY(13, general_protection_fault);
  172. void general_protection_fault_handler(TrapFrame* trap)
  173. {
  174. clac();
  175. handle_crash(*trap->regs, "General protection fault", SIGSEGV);
  176. }
  177. // 7: FPU not available exception
  178. EH_ENTRY_NO_CODE(7, fpu_exception);
  179. void fpu_exception_handler(TrapFrame*)
  180. {
  181. // Just clear the TS flag. We've already restored the FPU state eagerly.
  182. // FIXME: It would be nice if we didn't have to do this at all.
  183. asm volatile("clts");
  184. }
  185. // 14: Page Fault
  186. EH_ENTRY(14, page_fault);
  187. void page_fault_handler(TrapFrame* trap)
  188. {
  189. clac();
  190. auto& regs = *trap->regs;
  191. u32 fault_address;
  192. asm("movl %%cr2, %%eax"
  193. : "=a"(fault_address));
  194. if constexpr (PAGE_FAULT_DEBUG) {
  195. u32 fault_page_directory = read_cr3();
  196. dbgln("CPU #{} ring {} {} page fault in PD={:#x}, {}{} {}",
  197. Processor::is_initialized() ? Processor::id() : 0,
  198. regs.cs & 3,
  199. regs.exception_code & 1 ? "PV" : "NP",
  200. fault_page_directory,
  201. regs.exception_code & 8 ? "reserved-bit " : "",
  202. regs.exception_code & 2 ? "write" : "read",
  203. VirtualAddress(fault_address));
  204. dump(regs);
  205. }
  206. bool faulted_in_kernel = !(regs.cs & 3);
  207. if (faulted_in_kernel && Processor::current().in_irq()) {
  208. // If we're faulting in an IRQ handler, first check if we failed
  209. // due to safe_memcpy, safe_strnlen, or safe_memset. If we did,
  210. // gracefully continue immediately. Because we're in an IRQ handler
  211. // we can't really try to resolve the page fault in a meaningful
  212. // way, so we need to do this before calling into
  213. // MemoryManager::handle_page_fault, which would just bail and
  214. // request a crash
  215. if (handle_safe_access_fault(regs, fault_address))
  216. return;
  217. }
  218. auto current_thread = Thread::current();
  219. if (current_thread)
  220. current_thread->set_handling_page_fault(true);
  221. ScopeGuard guard = [current_thread] {
  222. if (current_thread)
  223. current_thread->set_handling_page_fault(false);
  224. };
  225. if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), VirtualAddress(regs.userspace_esp))) {
  226. dbgln("Invalid stack pointer: {}", VirtualAddress(regs.userspace_esp));
  227. handle_crash(regs, "Bad stack on page fault", SIGSTKFLT);
  228. }
  229. if (fault_address >= (FlatPtr)&start_of_ro_after_init && fault_address < (FlatPtr)&end_of_ro_after_init) {
  230. dump(regs);
  231. PANIC("Attempt to write into READONLY_AFTER_INIT section");
  232. }
  233. if (fault_address >= (FlatPtr)&start_of_unmap_after_init && fault_address < (FlatPtr)&end_of_unmap_after_init) {
  234. dump(regs);
  235. PANIC("Attempt to access UNMAP_AFTER_INIT section");
  236. }
  237. auto response = MM.handle_page_fault(PageFault(regs.exception_code, VirtualAddress(fault_address)));
  238. if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory) {
  239. if (faulted_in_kernel && handle_safe_access_fault(regs, fault_address)) {
  240. // If this would be a ring0 (kernel) fault and the fault was triggered by
  241. // safe_memcpy, safe_strnlen, or safe_memset then we resume execution at
  242. // the appropriate _fault label rather than crashing
  243. return;
  244. }
  245. if (response != PageFaultResponse::OutOfMemory) {
  246. if (current_thread->has_signal_handler(SIGSEGV)) {
  247. current_thread->send_urgent_signal_to_self(SIGSEGV);
  248. return;
  249. }
  250. }
  251. dbgln("Unrecoverable page fault, {}{}{} address {}",
  252. regs.exception_code & PageFaultFlags::ReservedBitViolation ? "reserved bit violation / " : "",
  253. regs.exception_code & PageFaultFlags::InstructionFetch ? "instruction fetch / " : "",
  254. regs.exception_code & PageFaultFlags::Write ? "write to" : "read from",
  255. VirtualAddress(fault_address));
  256. u32 malloc_scrub_pattern = explode_byte(MALLOC_SCRUB_BYTE);
  257. u32 free_scrub_pattern = explode_byte(FREE_SCRUB_BYTE);
  258. u32 kmalloc_scrub_pattern = explode_byte(KMALLOC_SCRUB_BYTE);
  259. u32 kfree_scrub_pattern = explode_byte(KFREE_SCRUB_BYTE);
  260. u32 slab_alloc_scrub_pattern = explode_byte(SLAB_ALLOC_SCRUB_BYTE);
  261. u32 slab_dealloc_scrub_pattern = explode_byte(SLAB_DEALLOC_SCRUB_BYTE);
  262. if ((fault_address & 0xffff0000) == (malloc_scrub_pattern & 0xffff0000)) {
  263. dbgln("Note: Address {} looks like it may be uninitialized malloc() memory", VirtualAddress(fault_address));
  264. } else if ((fault_address & 0xffff0000) == (free_scrub_pattern & 0xffff0000)) {
  265. dbgln("Note: Address {} looks like it may be recently free()'d memory", VirtualAddress(fault_address));
  266. } else if ((fault_address & 0xffff0000) == (kmalloc_scrub_pattern & 0xffff0000)) {
  267. dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
  268. } else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
  269. dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
  270. } else if ((fault_address & 0xffff0000) == (slab_alloc_scrub_pattern & 0xffff0000)) {
  271. dbgln("Note: Address {} looks like it may be uninitialized slab_alloc() memory", VirtualAddress(fault_address));
  272. } else if ((fault_address & 0xffff0000) == (slab_dealloc_scrub_pattern & 0xffff0000)) {
  273. dbgln("Note: Address {} looks like it may be recently slab_dealloc()'d memory", VirtualAddress(fault_address));
  274. } else if (fault_address < 4096) {
  275. dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
  276. }
  277. handle_crash(regs, "Page Fault", SIGSEGV, response == PageFaultResponse::OutOfMemory);
  278. } else if (response == PageFaultResponse::Continue) {
  279. #if PAGE_FAULT_DEBUG
  280. dbgln("Continuing after resolved page fault");
  281. #endif
  282. } else {
  283. VERIFY_NOT_REACHED();
  284. }
  285. }
  286. EH_ENTRY_NO_CODE(1, debug);
  287. void debug_handler(TrapFrame* trap)
  288. {
  289. clac();
  290. auto& regs = *trap->regs;
  291. auto current_thread = Thread::current();
  292. auto& process = current_thread->process();
  293. if ((regs.cs & 3) == 0) {
  294. PANIC("Debug exception in ring 0");
  295. }
  296. constexpr u8 REASON_SINGLESTEP = 14;
  297. bool is_reason_singlestep = (read_dr6() & (1 << REASON_SINGLESTEP));
  298. if (!is_reason_singlestep)
  299. return;
  300. if (auto tracer = process.tracer()) {
  301. tracer->set_regs(regs);
  302. }
  303. current_thread->send_urgent_signal_to_self(SIGTRAP);
  304. }
  305. EH_ENTRY_NO_CODE(3, breakpoint);
  306. void breakpoint_handler(TrapFrame* trap)
  307. {
  308. clac();
  309. auto& regs = *trap->regs;
  310. auto current_thread = Thread::current();
  311. auto& process = current_thread->process();
  312. if ((regs.cs & 3) == 0) {
  313. PANIC("Breakpoint trap in ring 0");
  314. }
  315. if (auto tracer = process.tracer()) {
  316. tracer->set_regs(regs);
  317. }
  318. current_thread->send_urgent_signal_to_self(SIGTRAP);
  319. }
  320. #define EH(i, msg) \
  321. static void _exception##i() \
  322. { \
  323. dbgln("{}", msg); \
  324. PANIC("cr0={:08x} cr2={:08x} cr3={:08x} cr4={:08x}", read_cr0(), read_cr2(), read_cr3(), read_cr4()); \
  325. }
  326. EH(2, "Unknown error")
  327. EH(4, "Overflow")
  328. EH(5, "Bounds check")
  329. EH(8, "Double fault")
  330. EH(9, "Coprocessor segment overrun")
  331. EH(10, "Invalid TSS")
  332. EH(11, "Segment not present")
  333. EH(12, "Stack exception")
  334. EH(15, "Unknown error")
  335. EH(16, "Coprocessor error")
  336. const DescriptorTablePointer& get_idtr()
  337. {
  338. return s_idtr;
  339. }
  340. static void unimp_trap()
  341. {
  342. PANIC("Unhandled IRQ");
  343. }
  344. GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
  345. {
  346. VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
  347. return *s_interrupt_handler[interrupt_number];
  348. }
  349. static void revert_to_unused_handler(u8 interrupt_number)
  350. {
  351. new UnhandledInterruptHandler(interrupt_number);
  352. }
  353. void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
  354. {
  355. VERIFY(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
  356. if (s_interrupt_handler[interrupt_number] != nullptr) {
  357. if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
  358. s_interrupt_handler[interrupt_number] = &handler;
  359. return;
  360. }
  361. if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
  362. VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
  363. static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
  364. return;
  365. }
  366. if (!s_interrupt_handler[interrupt_number]->is_shared_handler()) {
  367. if (s_interrupt_handler[interrupt_number]->type() == HandlerType::SpuriousInterruptHandler) {
  368. static_cast<SpuriousInterruptHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
  369. return;
  370. }
  371. VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
  372. auto& previous_handler = *s_interrupt_handler[interrupt_number];
  373. s_interrupt_handler[interrupt_number] = nullptr;
  374. SharedIRQHandler::initialize(interrupt_number);
  375. static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(previous_handler);
  376. static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
  377. return;
  378. }
  379. VERIFY_NOT_REACHED();
  380. } else {
  381. s_interrupt_handler[interrupt_number] = &handler;
  382. }
  383. }
  384. void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
  385. {
  386. VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
  387. if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
  388. dbgln("Trying to unregister unused handler (?)");
  389. return;
  390. }
  391. if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
  392. VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
  393. static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->unregister_handler(handler);
  394. if (!static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->sharing_devices_count()) {
  395. revert_to_unused_handler(interrupt_number);
  396. }
  397. return;
  398. }
  399. if (!s_interrupt_handler[interrupt_number]->is_shared_handler()) {
  400. VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
  401. revert_to_unused_handler(interrupt_number);
  402. return;
  403. }
  404. VERIFY_NOT_REACHED();
  405. }
  406. UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*f)())
  407. {
  408. s_idt[index].low = 0x00080000 | LSW((f));
  409. s_idt[index].high = ((u32)(f)&0xffff0000) | 0x8e00;
  410. }
  411. UNMAP_AFTER_INIT void register_user_callable_interrupt_handler(u8 index, void (*f)())
  412. {
  413. s_idt[index].low = 0x00080000 | LSW((f));
  414. s_idt[index].high = ((u32)(f)&0xffff0000) | 0xef00;
  415. }
  416. UNMAP_AFTER_INIT void flush_idt()
  417. {
  418. asm("lidt %0" ::"m"(s_idtr));
  419. }
  420. UNMAP_AFTER_INIT static void idt_init()
  421. {
  422. s_idtr.address = s_idt;
  423. s_idtr.limit = 256 * 8 - 1;
  424. register_interrupt_handler(0x00, divide_error_asm_entry);
  425. register_user_callable_interrupt_handler(0x01, debug_asm_entry);
  426. register_interrupt_handler(0x02, _exception2);
  427. register_user_callable_interrupt_handler(0x03, breakpoint_asm_entry);
  428. register_interrupt_handler(0x04, _exception4);
  429. register_interrupt_handler(0x05, _exception5);
  430. register_interrupt_handler(0x06, illegal_instruction_asm_entry);
  431. register_interrupt_handler(0x07, fpu_exception_asm_entry);
  432. register_interrupt_handler(0x08, _exception8);
  433. register_interrupt_handler(0x09, _exception9);
  434. register_interrupt_handler(0x0a, _exception10);
  435. register_interrupt_handler(0x0b, _exception11);
  436. register_interrupt_handler(0x0c, _exception12);
  437. register_interrupt_handler(0x0d, general_protection_fault_asm_entry);
  438. register_interrupt_handler(0x0e, page_fault_asm_entry);
  439. register_interrupt_handler(0x0f, _exception15);
  440. register_interrupt_handler(0x10, _exception16);
  441. for (u8 i = 0x11; i < 0x50; i++)
  442. register_interrupt_handler(i, unimp_trap);
  443. register_interrupt_handler(0x50, interrupt_80_asm_entry);
  444. register_interrupt_handler(0x51, interrupt_81_asm_entry);
  445. register_interrupt_handler(0x52, interrupt_82_asm_entry);
  446. register_interrupt_handler(0x53, interrupt_83_asm_entry);
  447. register_interrupt_handler(0x54, interrupt_84_asm_entry);
  448. register_interrupt_handler(0x55, interrupt_85_asm_entry);
  449. register_interrupt_handler(0x56, interrupt_86_asm_entry);
  450. register_interrupt_handler(0x57, interrupt_87_asm_entry);
  451. register_interrupt_handler(0x58, interrupt_88_asm_entry);
  452. register_interrupt_handler(0x59, interrupt_89_asm_entry);
  453. register_interrupt_handler(0x5a, interrupt_90_asm_entry);
  454. register_interrupt_handler(0x5b, interrupt_91_asm_entry);
  455. register_interrupt_handler(0x5c, interrupt_92_asm_entry);
  456. register_interrupt_handler(0x5d, interrupt_93_asm_entry);
  457. register_interrupt_handler(0x5e, interrupt_94_asm_entry);
  458. register_interrupt_handler(0x5f, interrupt_95_asm_entry);
  459. register_interrupt_handler(0x60, interrupt_96_asm_entry);
  460. register_interrupt_handler(0x61, interrupt_97_asm_entry);
  461. register_interrupt_handler(0x62, interrupt_98_asm_entry);
  462. register_interrupt_handler(0x63, interrupt_99_asm_entry);
  463. register_interrupt_handler(0x64, interrupt_100_asm_entry);
  464. register_interrupt_handler(0x65, interrupt_101_asm_entry);
  465. register_interrupt_handler(0x66, interrupt_102_asm_entry);
  466. register_interrupt_handler(0x67, interrupt_103_asm_entry);
  467. register_interrupt_handler(0x68, interrupt_104_asm_entry);
  468. register_interrupt_handler(0x69, interrupt_105_asm_entry);
  469. register_interrupt_handler(0x6a, interrupt_106_asm_entry);
  470. register_interrupt_handler(0x6b, interrupt_107_asm_entry);
  471. register_interrupt_handler(0x6c, interrupt_108_asm_entry);
  472. register_interrupt_handler(0x6d, interrupt_109_asm_entry);
  473. register_interrupt_handler(0x6e, interrupt_110_asm_entry);
  474. register_interrupt_handler(0x6f, interrupt_111_asm_entry);
  475. register_interrupt_handler(0x70, interrupt_112_asm_entry);
  476. register_interrupt_handler(0x71, interrupt_113_asm_entry);
  477. register_interrupt_handler(0x72, interrupt_114_asm_entry);
  478. register_interrupt_handler(0x73, interrupt_115_asm_entry);
  479. register_interrupt_handler(0x74, interrupt_116_asm_entry);
  480. register_interrupt_handler(0x75, interrupt_117_asm_entry);
  481. register_interrupt_handler(0x76, interrupt_118_asm_entry);
  482. register_interrupt_handler(0x77, interrupt_119_asm_entry);
  483. register_interrupt_handler(0x78, interrupt_120_asm_entry);
  484. register_interrupt_handler(0x79, interrupt_121_asm_entry);
  485. register_interrupt_handler(0x7a, interrupt_122_asm_entry);
  486. register_interrupt_handler(0x7b, interrupt_123_asm_entry);
  487. register_interrupt_handler(0x7c, interrupt_124_asm_entry);
  488. register_interrupt_handler(0x7d, interrupt_125_asm_entry);
  489. register_interrupt_handler(0x7e, interrupt_126_asm_entry);
  490. register_interrupt_handler(0x7f, interrupt_127_asm_entry);
  491. register_interrupt_handler(0x80, interrupt_128_asm_entry);
  492. register_interrupt_handler(0x81, interrupt_129_asm_entry);
  493. register_interrupt_handler(0x82, interrupt_130_asm_entry);
  494. register_interrupt_handler(0x83, interrupt_131_asm_entry);
  495. register_interrupt_handler(0x84, interrupt_132_asm_entry);
  496. register_interrupt_handler(0x85, interrupt_133_asm_entry);
  497. register_interrupt_handler(0x86, interrupt_134_asm_entry);
  498. register_interrupt_handler(0x87, interrupt_135_asm_entry);
  499. register_interrupt_handler(0x88, interrupt_136_asm_entry);
  500. register_interrupt_handler(0x89, interrupt_137_asm_entry);
  501. register_interrupt_handler(0x8a, interrupt_138_asm_entry);
  502. register_interrupt_handler(0x8b, interrupt_139_asm_entry);
  503. register_interrupt_handler(0x8c, interrupt_140_asm_entry);
  504. register_interrupt_handler(0x8d, interrupt_141_asm_entry);
  505. register_interrupt_handler(0x8e, interrupt_142_asm_entry);
  506. register_interrupt_handler(0x8f, interrupt_143_asm_entry);
  507. register_interrupt_handler(0x90, interrupt_144_asm_entry);
  508. register_interrupt_handler(0x91, interrupt_145_asm_entry);
  509. register_interrupt_handler(0x92, interrupt_146_asm_entry);
  510. register_interrupt_handler(0x93, interrupt_147_asm_entry);
  511. register_interrupt_handler(0x94, interrupt_148_asm_entry);
  512. register_interrupt_handler(0x95, interrupt_149_asm_entry);
  513. register_interrupt_handler(0x96, interrupt_150_asm_entry);
  514. register_interrupt_handler(0x97, interrupt_151_asm_entry);
  515. register_interrupt_handler(0x98, interrupt_152_asm_entry);
  516. register_interrupt_handler(0x99, interrupt_153_asm_entry);
  517. register_interrupt_handler(0x9a, interrupt_154_asm_entry);
  518. register_interrupt_handler(0x9b, interrupt_155_asm_entry);
  519. register_interrupt_handler(0x9c, interrupt_156_asm_entry);
  520. register_interrupt_handler(0x9d, interrupt_157_asm_entry);
  521. register_interrupt_handler(0x9e, interrupt_158_asm_entry);
  522. register_interrupt_handler(0x9f, interrupt_159_asm_entry);
  523. register_interrupt_handler(0xa0, interrupt_160_asm_entry);
  524. register_interrupt_handler(0xa1, interrupt_161_asm_entry);
  525. register_interrupt_handler(0xa2, interrupt_162_asm_entry);
  526. register_interrupt_handler(0xa3, interrupt_163_asm_entry);
  527. register_interrupt_handler(0xa4, interrupt_164_asm_entry);
  528. register_interrupt_handler(0xa5, interrupt_165_asm_entry);
  529. register_interrupt_handler(0xa6, interrupt_166_asm_entry);
  530. register_interrupt_handler(0xa7, interrupt_167_asm_entry);
  531. register_interrupt_handler(0xa8, interrupt_168_asm_entry);
  532. register_interrupt_handler(0xa9, interrupt_169_asm_entry);
  533. register_interrupt_handler(0xaa, interrupt_170_asm_entry);
  534. register_interrupt_handler(0xab, interrupt_171_asm_entry);
  535. register_interrupt_handler(0xac, interrupt_172_asm_entry);
  536. register_interrupt_handler(0xad, interrupt_173_asm_entry);
  537. register_interrupt_handler(0xae, interrupt_174_asm_entry);
  538. register_interrupt_handler(0xaf, interrupt_175_asm_entry);
  539. register_interrupt_handler(0xb0, interrupt_176_asm_entry);
  540. register_interrupt_handler(0xb1, interrupt_177_asm_entry);
  541. register_interrupt_handler(0xb2, interrupt_178_asm_entry);
  542. register_interrupt_handler(0xb3, interrupt_179_asm_entry);
  543. register_interrupt_handler(0xb4, interrupt_180_asm_entry);
  544. register_interrupt_handler(0xb5, interrupt_181_asm_entry);
  545. register_interrupt_handler(0xb6, interrupt_182_asm_entry);
  546. register_interrupt_handler(0xb7, interrupt_183_asm_entry);
  547. register_interrupt_handler(0xb8, interrupt_184_asm_entry);
  548. register_interrupt_handler(0xb9, interrupt_185_asm_entry);
  549. register_interrupt_handler(0xba, interrupt_186_asm_entry);
  550. register_interrupt_handler(0xbb, interrupt_187_asm_entry);
  551. register_interrupt_handler(0xbc, interrupt_188_asm_entry);
  552. register_interrupt_handler(0xbd, interrupt_189_asm_entry);
  553. register_interrupt_handler(0xbe, interrupt_190_asm_entry);
  554. register_interrupt_handler(0xbf, interrupt_191_asm_entry);
  555. register_interrupt_handler(0xc0, interrupt_192_asm_entry);
  556. register_interrupt_handler(0xc1, interrupt_193_asm_entry);
  557. register_interrupt_handler(0xc2, interrupt_194_asm_entry);
  558. register_interrupt_handler(0xc3, interrupt_195_asm_entry);
  559. register_interrupt_handler(0xc4, interrupt_196_asm_entry);
  560. register_interrupt_handler(0xc5, interrupt_197_asm_entry);
  561. register_interrupt_handler(0xc6, interrupt_198_asm_entry);
  562. register_interrupt_handler(0xc7, interrupt_199_asm_entry);
  563. register_interrupt_handler(0xc8, interrupt_200_asm_entry);
  564. register_interrupt_handler(0xc9, interrupt_201_asm_entry);
  565. register_interrupt_handler(0xca, interrupt_202_asm_entry);
  566. register_interrupt_handler(0xcb, interrupt_203_asm_entry);
  567. register_interrupt_handler(0xcc, interrupt_204_asm_entry);
  568. register_interrupt_handler(0xcd, interrupt_205_asm_entry);
  569. register_interrupt_handler(0xce, interrupt_206_asm_entry);
  570. register_interrupt_handler(0xcf, interrupt_207_asm_entry);
  571. register_interrupt_handler(0xd0, interrupt_208_asm_entry);
  572. register_interrupt_handler(0xd1, interrupt_209_asm_entry);
  573. register_interrupt_handler(0xd2, interrupt_210_asm_entry);
  574. register_interrupt_handler(0xd3, interrupt_211_asm_entry);
  575. register_interrupt_handler(0xd4, interrupt_212_asm_entry);
  576. register_interrupt_handler(0xd5, interrupt_213_asm_entry);
  577. register_interrupt_handler(0xd6, interrupt_214_asm_entry);
  578. register_interrupt_handler(0xd7, interrupt_215_asm_entry);
  579. register_interrupt_handler(0xd8, interrupt_216_asm_entry);
  580. register_interrupt_handler(0xd9, interrupt_217_asm_entry);
  581. register_interrupt_handler(0xda, interrupt_218_asm_entry);
  582. register_interrupt_handler(0xdb, interrupt_219_asm_entry);
  583. register_interrupt_handler(0xdc, interrupt_220_asm_entry);
  584. register_interrupt_handler(0xdd, interrupt_221_asm_entry);
  585. register_interrupt_handler(0xde, interrupt_222_asm_entry);
  586. register_interrupt_handler(0xdf, interrupt_223_asm_entry);
  587. register_interrupt_handler(0xe0, interrupt_224_asm_entry);
  588. register_interrupt_handler(0xe1, interrupt_225_asm_entry);
  589. register_interrupt_handler(0xe2, interrupt_226_asm_entry);
  590. register_interrupt_handler(0xe3, interrupt_227_asm_entry);
  591. register_interrupt_handler(0xe4, interrupt_228_asm_entry);
  592. register_interrupt_handler(0xe5, interrupt_229_asm_entry);
  593. register_interrupt_handler(0xe6, interrupt_230_asm_entry);
  594. register_interrupt_handler(0xe7, interrupt_231_asm_entry);
  595. register_interrupt_handler(0xe8, interrupt_232_asm_entry);
  596. register_interrupt_handler(0xe9, interrupt_233_asm_entry);
  597. register_interrupt_handler(0xea, interrupt_234_asm_entry);
  598. register_interrupt_handler(0xeb, interrupt_235_asm_entry);
  599. register_interrupt_handler(0xec, interrupt_236_asm_entry);
  600. register_interrupt_handler(0xed, interrupt_237_asm_entry);
  601. register_interrupt_handler(0xee, interrupt_238_asm_entry);
  602. register_interrupt_handler(0xef, interrupt_239_asm_entry);
  603. register_interrupt_handler(0xf0, interrupt_240_asm_entry);
  604. register_interrupt_handler(0xf1, interrupt_241_asm_entry);
  605. register_interrupt_handler(0xf2, interrupt_242_asm_entry);
  606. register_interrupt_handler(0xf3, interrupt_243_asm_entry);
  607. register_interrupt_handler(0xf4, interrupt_244_asm_entry);
  608. register_interrupt_handler(0xf5, interrupt_245_asm_entry);
  609. register_interrupt_handler(0xf6, interrupt_246_asm_entry);
  610. register_interrupt_handler(0xf7, interrupt_247_asm_entry);
  611. register_interrupt_handler(0xf8, interrupt_248_asm_entry);
  612. register_interrupt_handler(0xf9, interrupt_249_asm_entry);
  613. register_interrupt_handler(0xfa, interrupt_250_asm_entry);
  614. register_interrupt_handler(0xfb, interrupt_251_asm_entry);
  615. register_interrupt_handler(0xfc, interrupt_252_asm_entry);
  616. register_interrupt_handler(0xfd, interrupt_253_asm_entry);
  617. register_interrupt_handler(0xfe, interrupt_254_asm_entry);
  618. register_interrupt_handler(0xff, interrupt_255_asm_entry);
  619. dbgln("Installing Unhandled Handlers");
  620. for (u8 i = 0; i < GENERIC_INTERRUPT_HANDLERS_COUNT; ++i) {
  621. new UnhandledInterruptHandler(i);
  622. }
  623. flush_idt();
  624. }
  625. void load_task_register(u16 selector)
  626. {
  627. asm("ltr %0" ::"r"(selector));
  628. }
  629. void handle_interrupt(TrapFrame* trap)
  630. {
  631. clac();
  632. auto& regs = *trap->regs;
  633. VERIFY(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
  634. u8 irq = (u8)(regs.isr_number - 0x50);
  635. s_entropy_source_interrupts.add_random_event(irq);
  636. auto* handler = s_interrupt_handler[irq];
  637. VERIFY(handler);
  638. handler->increment_invoking_counter();
  639. handler->handle_interrupt(regs);
  640. handler->eoi();
  641. }
  642. void enter_trap_no_irq(TrapFrame* trap)
  643. {
  644. InterruptDisabler disable;
  645. Processor::current().enter_trap(*trap, false);
  646. }
  647. void enter_trap(TrapFrame* trap)
  648. {
  649. InterruptDisabler disable;
  650. Processor::current().enter_trap(*trap, true);
  651. }
  652. void exit_trap(TrapFrame* trap)
  653. {
  654. InterruptDisabler disable;
  655. return Processor::current().exit_trap(*trap);
  656. }
  657. UNMAP_AFTER_INIT void write_cr0(u32 value)
  658. {
  659. asm volatile("movl %%eax, %%cr0" ::"a"(value));
  660. }
  661. UNMAP_AFTER_INIT void write_cr4(u32 value)
  662. {
  663. asm volatile("movl %%eax, %%cr4" ::"a"(value));
  664. }
  665. UNMAP_AFTER_INIT static void sse_init()
  666. {
  667. write_cr0((read_cr0() & 0xfffffffbu) | 0x2);
  668. write_cr4(read_cr4() | 0x600);
  669. }
  670. u32 read_cr0()
  671. {
  672. u32 cr0;
  673. asm("movl %%cr0, %%eax"
  674. : "=a"(cr0));
  675. return cr0;
  676. }
  677. u32 read_cr2()
  678. {
  679. u32 cr2;
  680. asm("movl %%cr2, %%eax"
  681. : "=a"(cr2));
  682. return cr2;
  683. }
  684. u32 read_cr3()
  685. {
  686. u32 cr3;
  687. asm("movl %%cr3, %%eax"
  688. : "=a"(cr3));
  689. return cr3;
  690. }
  691. void write_cr3(u32 cr3)
  692. {
  693. // NOTE: If you're here from a GPF crash, it's very likely that a PDPT entry is incorrect, not this!
  694. asm volatile("movl %%eax, %%cr3" ::"a"(cr3)
  695. : "memory");
  696. }
  697. u32 read_cr4()
  698. {
  699. u32 cr4;
  700. asm("movl %%cr4, %%eax"
  701. : "=a"(cr4));
  702. return cr4;
  703. }
  704. u32 read_dr6()
  705. {
  706. u32 dr6;
  707. asm("movl %%dr6, %%eax"
  708. : "=a"(dr6));
  709. return dr6;
  710. }
  711. READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
  712. READONLY_AFTER_INIT static Vector<Processor*>* s_processors;
  713. static SpinLock s_processor_lock;
  714. READONLY_AFTER_INIT volatile u32 Processor::g_total_processors;
  715. static volatile bool s_smp_enabled;
  716. Vector<Processor*>& Processor::processors()
  717. {
  718. VERIFY(s_processors);
  719. return *s_processors;
  720. }
  721. Processor& Processor::by_id(u32 cpu)
  722. {
  723. // s_processors does not need to be protected by a lock of any kind.
  724. // It is populated early in the boot process, and the BSP is waiting
  725. // for all APs to finish, after which this array never gets modified
  726. // again, so it's safe to not protect access to it here
  727. auto& procs = processors();
  728. VERIFY(procs[cpu] != nullptr);
  729. VERIFY(procs.size() > cpu);
  730. return *procs[cpu];
  731. }
  732. [[noreturn]] static inline void halt_this()
  733. {
  734. for (;;) {
  735. asm volatile("cli; hlt");
  736. }
  737. }
  738. UNMAP_AFTER_INIT void Processor::cpu_detect()
  739. {
  740. // NOTE: This is called during Processor::early_initialize, we cannot
  741. // safely log at this point because we don't have kmalloc
  742. // initialized yet!
  743. auto set_feature =
  744. [&](CPUFeature f) {
  745. m_features = static_cast<CPUFeature>(static_cast<u32>(m_features) | static_cast<u32>(f));
  746. };
  747. m_features = static_cast<CPUFeature>(0);
  748. CPUID processor_info(0x1);
  749. if (processor_info.edx() & (1 << 4))
  750. set_feature(CPUFeature::TSC);
  751. if (processor_info.edx() & (1 << 6))
  752. set_feature(CPUFeature::PAE);
  753. if (processor_info.edx() & (1 << 13))
  754. set_feature(CPUFeature::PGE);
  755. if (processor_info.edx() & (1 << 23))
  756. set_feature(CPUFeature::MMX);
  757. if (processor_info.edx() & (1 << 25))
  758. set_feature(CPUFeature::SSE);
  759. if (processor_info.edx() & (1 << 26))
  760. set_feature(CPUFeature::SSE2);
  761. if (processor_info.ecx() & (1 << 0))
  762. set_feature(CPUFeature::SSE3);
  763. if (processor_info.ecx() & (1 << 9))
  764. set_feature(CPUFeature::SSSE3);
  765. if (processor_info.ecx() & (1 << 19))
  766. set_feature(CPUFeature::SSE4_1);
  767. if (processor_info.ecx() & (1 << 20))
  768. set_feature(CPUFeature::SSE4_2);
  769. if (processor_info.ecx() & (1 << 30))
  770. set_feature(CPUFeature::RDRAND);
  771. if (processor_info.edx() & (1 << 11)) {
  772. u32 stepping = processor_info.eax() & 0xf;
  773. u32 model = (processor_info.eax() >> 4) & 0xf;
  774. u32 family = (processor_info.eax() >> 8) & 0xf;
  775. if (!(family == 6 && model < 3 && stepping < 3))
  776. set_feature(CPUFeature::SEP);
  777. if ((family == 6 && model >= 3) || (family == 0xf && model >= 0xe))
  778. set_feature(CPUFeature::CONSTANT_TSC);
  779. }
  780. u32 max_extended_leaf = CPUID(0x80000000).eax();
  781. VERIFY(max_extended_leaf >= 0x80000001);
  782. CPUID extended_processor_info(0x80000001);
  783. if (extended_processor_info.edx() & (1 << 20))
  784. set_feature(CPUFeature::NX);
  785. if (extended_processor_info.edx() & (1 << 27))
  786. set_feature(CPUFeature::RDTSCP);
  787. if (extended_processor_info.edx() & (1 << 11)) {
  788. // Only available in 64 bit mode
  789. set_feature(CPUFeature::SYSCALL);
  790. }
  791. if (max_extended_leaf >= 0x80000007) {
  792. CPUID cpuid(0x80000007);
  793. if (cpuid.edx() & (1 << 8)) {
  794. set_feature(CPUFeature::CONSTANT_TSC);
  795. set_feature(CPUFeature::NONSTOP_TSC);
  796. }
  797. }
  798. if (max_extended_leaf >= 0x80000008) {
  799. // CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
  800. CPUID cpuid(0x80000008);
  801. m_physical_address_bit_width = cpuid.eax() & 0xff;
  802. } else {
  803. // For processors that do not support CPUID function 80000008H, the width is generally 36 if CPUID.01H:EDX.PAE [bit 6] = 1 and 32 otherwise.
  804. m_physical_address_bit_width = has_feature(CPUFeature::PAE) ? 36 : 32;
  805. }
  806. CPUID extended_features(0x7);
  807. if (extended_features.ebx() & (1 << 20))
  808. set_feature(CPUFeature::SMAP);
  809. if (extended_features.ebx() & (1 << 7))
  810. set_feature(CPUFeature::SMEP);
  811. if (extended_features.ecx() & (1 << 2))
  812. set_feature(CPUFeature::UMIP);
  813. if (extended_features.ebx() & (1 << 18))
  814. set_feature(CPUFeature::RDSEED);
  815. }
  816. UNMAP_AFTER_INIT void Processor::cpu_setup()
  817. {
  818. // NOTE: This is called during Processor::early_initialize, we cannot
  819. // safely log at this point because we don't have kmalloc
  820. // initialized yet!
  821. cpu_detect();
  822. if (has_feature(CPUFeature::SSE))
  823. sse_init();
  824. write_cr0(read_cr0() | 0x00010000);
  825. if (has_feature(CPUFeature::PGE)) {
  826. // Turn on CR4.PGE so the CPU will respect the G bit in page tables.
  827. write_cr4(read_cr4() | 0x80);
  828. }
  829. if (has_feature(CPUFeature::NX)) {
  830. // Turn on IA32_EFER.NXE
  831. asm volatile(
  832. "movl $0xc0000080, %ecx\n"
  833. "rdmsr\n"
  834. "orl $0x800, %eax\n"
  835. "wrmsr\n");
  836. }
  837. if (has_feature(CPUFeature::SMEP)) {
  838. // Turn on CR4.SMEP
  839. write_cr4(read_cr4() | 0x100000);
  840. }
  841. if (has_feature(CPUFeature::SMAP)) {
  842. // Turn on CR4.SMAP
  843. write_cr4(read_cr4() | 0x200000);
  844. }
  845. if (has_feature(CPUFeature::UMIP)) {
  846. write_cr4(read_cr4() | 0x800);
  847. }
  848. if (has_feature(CPUFeature::TSC)) {
  849. write_cr4(read_cr4() | 0x4);
  850. }
  851. }
  852. String Processor::features_string() const
  853. {
  854. StringBuilder builder;
  855. auto feature_to_str =
  856. [](CPUFeature f) -> const char* {
  857. switch (f) {
  858. case CPUFeature::NX:
  859. return "nx";
  860. case CPUFeature::PAE:
  861. return "pae";
  862. case CPUFeature::PGE:
  863. return "pge";
  864. case CPUFeature::RDRAND:
  865. return "rdrand";
  866. case CPUFeature::RDSEED:
  867. return "rdseed";
  868. case CPUFeature::SMAP:
  869. return "smap";
  870. case CPUFeature::SMEP:
  871. return "smep";
  872. case CPUFeature::SSE:
  873. return "sse";
  874. case CPUFeature::TSC:
  875. return "tsc";
  876. case CPUFeature::RDTSCP:
  877. return "rdtscp";
  878. case CPUFeature::CONSTANT_TSC:
  879. return "constant_tsc";
  880. case CPUFeature::NONSTOP_TSC:
  881. return "nonstop_tsc";
  882. case CPUFeature::UMIP:
  883. return "umip";
  884. case CPUFeature::SEP:
  885. return "sep";
  886. case CPUFeature::SYSCALL:
  887. return "syscall";
  888. case CPUFeature::MMX:
  889. return "mmx";
  890. case CPUFeature::SSE2:
  891. return "sse2";
  892. case CPUFeature::SSE3:
  893. return "sse3";
  894. case CPUFeature::SSSE3:
  895. return "ssse3";
  896. case CPUFeature::SSE4_1:
  897. return "sse4.1";
  898. case CPUFeature::SSE4_2:
  899. return "sse4.2";
  900. // no default statement here intentionally so that we get
  901. // a warning if a new feature is forgotten to be added here
  902. }
  903. // Shouldn't ever happen
  904. return "???";
  905. };
  906. bool first = true;
  907. for (u32 flag = 1; flag != 0; flag <<= 1) {
  908. if ((static_cast<u32>(m_features) & flag) != 0) {
  909. if (first)
  910. first = false;
  911. else
  912. builder.append(' ');
  913. auto str = feature_to_str(static_cast<CPUFeature>(flag));
  914. builder.append(str, strlen(str));
  915. }
  916. }
  917. return builder.build();
  918. }
  919. String Processor::platform_string() const
  920. {
  921. return "i386";
  922. }
  923. UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
  924. {
  925. m_self = this;
  926. m_cpu = cpu;
  927. m_in_irq = 0;
  928. m_in_critical = 0;
  929. m_invoke_scheduler_async = false;
  930. m_scheduler_initialized = false;
  931. m_message_queue = nullptr;
  932. m_idle_thread = nullptr;
  933. m_current_thread = nullptr;
  934. m_scheduler_data = nullptr;
  935. m_mm_data = nullptr;
  936. m_info = nullptr;
  937. m_halt_requested = false;
  938. if (cpu == 0) {
  939. s_smp_enabled = false;
  940. atomic_store(&g_total_processors, 1u, AK::MemoryOrder::memory_order_release);
  941. } else {
  942. atomic_fetch_add(&g_total_processors, 1u, AK::MemoryOrder::memory_order_acq_rel);
  943. }
  944. deferred_call_pool_init();
  945. cpu_setup();
  946. gdt_init();
  947. VERIFY(is_initialized()); // sanity check
  948. VERIFY(&current() == this); // sanity check
  949. }
  950. UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
  951. {
  952. VERIFY(m_self == this);
  953. VERIFY(&current() == this); // sanity check
  954. dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
  955. if (!has_feature(CPUFeature::RDRAND))
  956. dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", id());
  957. dmesgln("CPU[{}]: Physical address bit width: {}", id(), m_physical_address_bit_width);
  958. if (cpu == 0)
  959. idt_init();
  960. else
  961. flush_idt();
  962. if (cpu == 0) {
  963. VERIFY((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
  964. asm volatile("fninit");
  965. asm volatile("fxsave %0"
  966. : "=m"(s_clean_fpu_state));
  967. }
  968. m_info = new ProcessorInfo(*this);
  969. {
  970. ScopedSpinLock lock(s_processor_lock);
  971. // We need to prevent races between APs starting up at the same time
  972. if (!s_processors)
  973. s_processors = new Vector<Processor*>();
  974. if (cpu >= s_processors->size())
  975. s_processors->resize(cpu + 1);
  976. (*s_processors)[cpu] = this;
  977. }
  978. }
  979. void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
  980. {
  981. u16 i = (selector & 0xfffc) >> 3;
  982. u32 prev_gdt_length = m_gdt_length;
  983. if (i > m_gdt_length) {
  984. m_gdt_length = i + 1;
  985. VERIFY(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
  986. m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
  987. }
  988. m_gdt[i].low = low;
  989. m_gdt[i].high = high;
  990. // clear selectors we may have skipped
  991. while (i < prev_gdt_length) {
  992. m_gdt[i].low = 0;
  993. m_gdt[i].high = 0;
  994. i++;
  995. }
  996. }
  997. void Processor::write_gdt_entry(u16 selector, Descriptor& descriptor)
  998. {
  999. write_raw_gdt_entry(selector, descriptor.low, descriptor.high);
  1000. }
  1001. Descriptor& Processor::get_gdt_entry(u16 selector)
  1002. {
  1003. u16 i = (selector & 0xfffc) >> 3;
  1004. return *(Descriptor*)(&m_gdt[i]);
  1005. }
  1006. void Processor::flush_gdt()
  1007. {
  1008. m_gdtr.address = m_gdt;
  1009. m_gdtr.limit = (m_gdt_length * 8) - 1;
  1010. asm volatile("lgdt %0" ::"m"(m_gdtr)
  1011. : "memory");
  1012. }
  1013. const DescriptorTablePointer& Processor::get_gdtr()
  1014. {
  1015. return m_gdtr;
  1016. }
  1017. Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
  1018. {
  1019. FlatPtr frame_ptr = 0, eip = 0;
  1020. Vector<FlatPtr, 32> stack_trace;
  1021. auto walk_stack = [&](FlatPtr stack_ptr) {
  1022. static constexpr size_t max_stack_frames = 4096;
  1023. stack_trace.append(eip);
  1024. size_t count = 1;
  1025. while (stack_ptr && stack_trace.size() < max_stack_frames) {
  1026. FlatPtr retaddr;
  1027. count++;
  1028. if (max_frames != 0 && count > max_frames)
  1029. break;
  1030. if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
  1031. if (!copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]) || !retaddr)
  1032. break;
  1033. stack_trace.append(retaddr);
  1034. if (!copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr))
  1035. break;
  1036. } else {
  1037. void* fault_at;
  1038. if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at) || !retaddr)
  1039. break;
  1040. stack_trace.append(retaddr);
  1041. if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at))
  1042. break;
  1043. }
  1044. }
  1045. };
  1046. auto capture_current_thread = [&]() {
  1047. frame_ptr = (FlatPtr)__builtin_frame_address(0);
  1048. eip = (FlatPtr)__builtin_return_address(0);
  1049. walk_stack(frame_ptr);
  1050. };
  1051. // Since the thread may be running on another processor, there
  1052. // is a chance a context switch may happen while we're trying
  1053. // to get it. It also won't be entirely accurate and merely
  1054. // reflect the status at the last context switch.
  1055. ScopedSpinLock lock(g_scheduler_lock);
  1056. if (&thread == Processor::current_thread()) {
  1057. VERIFY(thread.state() == Thread::Running);
  1058. // Leave the scheduler lock. If we trigger page faults we may
  1059. // need to be preempted. Since this is our own thread it won't
  1060. // cause any problems as the stack won't change below this frame.
  1061. lock.unlock();
  1062. capture_current_thread();
  1063. } else if (thread.is_active()) {
  1064. VERIFY(thread.cpu() != Processor::id());
  1065. // If this is the case, the thread is currently running
  1066. // on another processor. We can't trust the kernel stack as
  1067. // it may be changing at any time. We need to probably send
  1068. // an IPI to that processor, have it walk the stack and wait
  1069. // until it returns the data back to us
  1070. auto& proc = Processor::current();
  1071. smp_unicast(
  1072. thread.cpu(),
  1073. [&]() {
  1074. dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
  1075. ProcessPagingScope paging_scope(thread.process());
  1076. VERIFY(&Processor::current() != &proc);
  1077. VERIFY(&thread == Processor::current_thread());
  1078. // NOTE: Because the other processor is still holding the
  1079. // scheduler lock while waiting for this callback to finish,
  1080. // the current thread on the target processor cannot change
  1081. // TODO: What to do about page faults here? We might deadlock
  1082. // because the other processor is still holding the
  1083. // scheduler lock...
  1084. capture_current_thread();
  1085. },
  1086. false);
  1087. } else {
  1088. switch (thread.state()) {
  1089. case Thread::Running:
  1090. VERIFY_NOT_REACHED(); // should have been handled above
  1091. case Thread::Runnable:
  1092. case Thread::Stopped:
  1093. case Thread::Blocked:
  1094. case Thread::Dying:
  1095. case Thread::Dead: {
  1096. // We need to retrieve ebp from what was last pushed to the kernel
  1097. // stack. Before switching out of that thread, it switch_context
  1098. // pushed the callee-saved registers, and the last of them happens
  1099. // to be ebp.
  1100. ProcessPagingScope paging_scope(thread.process());
  1101. auto& tss = thread.tss();
  1102. u32* stack_top = reinterpret_cast<u32*>(tss.esp);
  1103. if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
  1104. if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
  1105. frame_ptr = 0;
  1106. } else {
  1107. void* fault_at;
  1108. if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
  1109. frame_ptr = 0;
  1110. }
  1111. eip = tss.eip;
  1112. // TODO: We need to leave the scheduler lock here, but we also
  1113. // need to prevent the target thread from being run while
  1114. // we walk the stack
  1115. lock.unlock();
  1116. walk_stack(frame_ptr);
  1117. break;
  1118. }
  1119. default:
  1120. dbgln("Cannot capture stack trace for thread {} in state {}", thread, thread.state_string());
  1121. break;
  1122. }
  1123. }
  1124. return stack_trace;
  1125. }
  1126. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
  1127. {
  1128. VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running);
  1129. VERIFY(to_thread->state() == Thread::Running);
  1130. Processor::set_current_thread(*to_thread);
  1131. auto& from_tss = from_thread->tss();
  1132. auto& to_tss = to_thread->tss();
  1133. asm volatile("fxsave %0"
  1134. : "=m"(from_thread->fpu_state()));
  1135. from_tss.fs = get_fs();
  1136. from_tss.gs = get_gs();
  1137. set_fs(to_tss.fs);
  1138. set_gs(to_tss.gs);
  1139. auto& processor = Processor::current();
  1140. auto& tls_descriptor = processor.get_gdt_entry(GDT_SELECTOR_TLS);
  1141. tls_descriptor.set_base(to_thread->thread_specific_data().as_ptr());
  1142. tls_descriptor.set_limit(to_thread->thread_specific_region_size());
  1143. if (from_tss.cr3 != to_tss.cr3)
  1144. write_cr3(to_tss.cr3);
  1145. to_thread->set_cpu(processor.get_id());
  1146. processor.restore_in_critical(to_thread->saved_critical());
  1147. asm volatile("fxrstor %0" ::"m"(to_thread->fpu_state()));
  1148. // TODO: debug registers
  1149. // TODO: ioperm?
  1150. }
  1151. #define ENTER_THREAD_CONTEXT_ARGS_SIZE (2 * 4) // to_thread, from_thread
  1152. void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
  1153. {
  1154. VERIFY(!in_irq());
  1155. VERIFY(m_in_critical == 1);
  1156. VERIFY(is_kernel_mode());
  1157. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
  1158. from_thread->save_critical(m_in_critical);
  1159. // clang-format off
  1160. // Switch to new thread context, passing from_thread and to_thread
  1161. // through to the new context using registers edx and eax
  1162. asm volatile(
  1163. // NOTE: changing how much we push to the stack affects
  1164. // SWITCH_CONTEXT_TO_STACK_SIZE and thread_context_first_enter()!
  1165. "pushfl \n"
  1166. "pushl %%ebx \n"
  1167. "pushl %%esi \n"
  1168. "pushl %%edi \n"
  1169. "pushl %%ebp \n"
  1170. "movl %%esp, %[from_esp] \n"
  1171. "movl $1f, %[from_eip] \n"
  1172. "movl %[to_esp0], %%ebx \n"
  1173. "movl %%ebx, %[tss_esp0] \n"
  1174. "movl %[to_esp], %%esp \n"
  1175. "pushl %[to_thread] \n"
  1176. "pushl %[from_thread] \n"
  1177. "pushl %[to_eip] \n"
  1178. "cld \n"
  1179. "jmp enter_thread_context \n"
  1180. "1: \n"
  1181. "popl %%edx \n"
  1182. "popl %%eax \n"
  1183. "popl %%ebp \n"
  1184. "popl %%edi \n"
  1185. "popl %%esi \n"
  1186. "popl %%ebx \n"
  1187. "popfl \n"
  1188. : [from_esp] "=m" (from_thread->tss().esp),
  1189. [from_eip] "=m" (from_thread->tss().eip),
  1190. [tss_esp0] "=m" (m_tss.esp0),
  1191. "=d" (from_thread), // needed so that from_thread retains the correct value
  1192. "=a" (to_thread) // needed so that to_thread retains the correct value
  1193. : [to_esp] "g" (to_thread->tss().esp),
  1194. [to_esp0] "g" (to_thread->tss().esp0),
  1195. [to_eip] "c" (to_thread->tss().eip),
  1196. [from_thread] "d" (from_thread),
  1197. [to_thread] "a" (to_thread)
  1198. : "memory"
  1199. );
  1200. // clang-format on
  1201. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  1202. Processor::current().restore_in_critical(to_thread->saved_critical());
  1203. }
  1204. extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
  1205. {
  1206. VERIFY(!are_interrupts_enabled());
  1207. VERIFY(is_kernel_mode());
  1208. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  1209. VERIFY(to_thread == Thread::current());
  1210. Scheduler::enter_current(*from_thread, true);
  1211. // Since we got here and don't have Scheduler::context_switch in the
  1212. // call stack (because this is the first time we switched into this
  1213. // context), we need to notify the scheduler so that it can release
  1214. // the scheduler lock. We don't want to enable interrupts at this point
  1215. // as we're still in the middle of a context switch. Doing so could
  1216. // trigger a context switch within a context switch, leading to a crash.
  1217. Scheduler::leave_on_first_switch(trap->regs->eflags & ~0x200);
  1218. }
  1219. extern "C" void thread_context_first_enter(void);
  1220. // clang-format off
  1221. asm(
  1222. // enter_thread_context returns to here first time a thread is executing
  1223. ".globl thread_context_first_enter \n"
  1224. "thread_context_first_enter: \n"
  1225. // switch_context will have pushed from_thread and to_thread to our new
  1226. // stack prior to thread_context_first_enter() being called, and the
  1227. // pointer to TrapFrame was the top of the stack before that
  1228. " movl 8(%esp), %ebx \n" // save pointer to TrapFrame
  1229. " cld \n"
  1230. " call context_first_init \n"
  1231. " addl $" __STRINGIFY(ENTER_THREAD_CONTEXT_ARGS_SIZE) ", %esp \n"
  1232. " movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
  1233. " jmp common_trap_exit \n"
  1234. );
  1235. // clang-format on
  1236. void exit_kernel_thread(void)
  1237. {
  1238. Thread::current()->exit();
  1239. }
  1240. u32 Processor::init_context(Thread& thread, bool leave_crit)
  1241. {
  1242. VERIFY(is_kernel_mode());
  1243. VERIFY(g_scheduler_lock.is_locked());
  1244. if (leave_crit) {
  1245. // Leave the critical section we set up in in Process::exec,
  1246. // but because we still have the scheduler lock we should end up with 1
  1247. m_in_critical--; // leave it without triggering anything or restoring flags
  1248. VERIFY(in_critical() == 1);
  1249. }
  1250. u32 kernel_stack_top = thread.kernel_stack_top();
  1251. // Add a random offset between 0-256 (16-byte aligned)
  1252. kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
  1253. u32 stack_top = kernel_stack_top;
  1254. // TODO: handle NT?
  1255. VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
  1256. auto& tss = thread.tss();
  1257. bool return_to_user = (tss.cs & 3) != 0;
  1258. // make room for an interrupt frame
  1259. if (!return_to_user) {
  1260. // userspace_esp and userspace_ss are not popped off by iret
  1261. // unless we're switching back to user mode
  1262. stack_top -= sizeof(RegisterState) - 2 * sizeof(u32);
  1263. // For kernel threads we'll push the thread function argument
  1264. // which should be in tss.esp and exit_kernel_thread as return
  1265. // address.
  1266. stack_top -= 2 * sizeof(u32);
  1267. *reinterpret_cast<u32*>(kernel_stack_top - 2 * sizeof(u32)) = tss.esp;
  1268. *reinterpret_cast<u32*>(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread);
  1269. } else {
  1270. stack_top -= sizeof(RegisterState);
  1271. }
  1272. // we want to end up 16-byte aligned, %esp + 4 should be aligned
  1273. stack_top -= sizeof(u32);
  1274. *reinterpret_cast<u32*>(kernel_stack_top - sizeof(u32)) = 0;
  1275. // set up the stack so that after returning from thread_context_first_enter()
  1276. // we will end up either in kernel mode or user mode, depending on how the thread is set up
  1277. // However, the first step is to always start in kernel mode with thread_context_first_enter
  1278. RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
  1279. iretframe.ss = tss.ss;
  1280. iretframe.gs = tss.gs;
  1281. iretframe.fs = tss.fs;
  1282. iretframe.es = tss.es;
  1283. iretframe.ds = tss.ds;
  1284. iretframe.edi = tss.edi;
  1285. iretframe.esi = tss.esi;
  1286. iretframe.ebp = tss.ebp;
  1287. iretframe.esp = 0;
  1288. iretframe.ebx = tss.ebx;
  1289. iretframe.edx = tss.edx;
  1290. iretframe.ecx = tss.ecx;
  1291. iretframe.eax = tss.eax;
  1292. iretframe.eflags = tss.eflags;
  1293. iretframe.eip = tss.eip;
  1294. iretframe.cs = tss.cs;
  1295. if (return_to_user) {
  1296. iretframe.userspace_esp = tss.esp;
  1297. iretframe.userspace_ss = tss.ss;
  1298. }
  1299. // make space for a trap frame
  1300. stack_top -= sizeof(TrapFrame);
  1301. TrapFrame& trap = *reinterpret_cast<TrapFrame*>(stack_top);
  1302. trap.regs = &iretframe;
  1303. trap.prev_irq_level = 0;
  1304. trap.next_trap = nullptr;
  1305. stack_top -= sizeof(u32); // pointer to TrapFrame
  1306. *reinterpret_cast<u32*>(stack_top) = stack_top + 4;
  1307. if constexpr (CONTEXT_SWITCH_DEBUG) {
  1308. if (return_to_user) {
  1309. dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}",
  1310. thread,
  1311. VirtualAddress(&thread),
  1312. iretframe.cs, tss.eip,
  1313. VirtualAddress(tss.esp),
  1314. VirtualAddress(stack_top),
  1315. iretframe.userspace_ss,
  1316. iretframe.userspace_esp);
  1317. } else {
  1318. dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}",
  1319. thread,
  1320. VirtualAddress(&thread),
  1321. iretframe.cs, tss.eip,
  1322. VirtualAddress(tss.esp),
  1323. VirtualAddress(stack_top));
  1324. }
  1325. }
  1326. // make switch_context() always first return to thread_context_first_enter()
  1327. // in kernel mode, so set up these values so that we end up popping iretframe
  1328. // off the stack right after the context switch completed, at which point
  1329. // control is transferred to what iretframe is pointing to.
  1330. tss.eip = FlatPtr(&thread_context_first_enter);
  1331. tss.esp0 = kernel_stack_top;
  1332. tss.esp = stack_top;
  1333. tss.cs = GDT_SELECTOR_CODE0;
  1334. tss.ds = GDT_SELECTOR_DATA0;
  1335. tss.es = GDT_SELECTOR_DATA0;
  1336. tss.gs = GDT_SELECTOR_DATA0;
  1337. tss.ss = GDT_SELECTOR_DATA0;
  1338. tss.fs = GDT_SELECTOR_PROC;
  1339. return stack_top;
  1340. }
  1341. extern "C" u32 do_init_context(Thread* thread, u32 flags)
  1342. {
  1343. VERIFY_INTERRUPTS_DISABLED();
  1344. thread->tss().eflags = flags;
  1345. return Processor::current().init_context(*thread, true);
  1346. }
  1347. extern "C" void do_assume_context(Thread* thread, u32 flags);
  1348. // clang-format off
  1349. asm(
  1350. ".global do_assume_context \n"
  1351. "do_assume_context: \n"
  1352. " movl 4(%esp), %ebx \n"
  1353. " movl 8(%esp), %esi \n"
  1354. // We're going to call Processor::init_context, so just make sure
  1355. // we have enough stack space so we don't stomp over it
  1356. " subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n"
  1357. " pushl %esi \n"
  1358. " pushl %ebx \n"
  1359. " cld \n"
  1360. " call do_init_context \n"
  1361. " addl $8, %esp \n"
  1362. " movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us
  1363. " pushl %ebx \n" // push to_thread
  1364. " pushl %ebx \n" // push from_thread
  1365. " pushl $thread_context_first_enter \n" // should be same as tss.eip
  1366. " jmp enter_thread_context \n"
  1367. );
  1368. // clang-format on
  1369. void Processor::assume_context(Thread& thread, u32 flags)
  1370. {
  1371. dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
  1372. VERIFY_INTERRUPTS_DISABLED();
  1373. Scheduler::prepare_after_exec();
  1374. // in_critical() should be 2 here. The critical section in Process::exec
  1375. // and then the scheduler lock
  1376. VERIFY(Processor::current().in_critical() == 2);
  1377. do_assume_context(&thread, flags);
  1378. VERIFY_NOT_REACHED();
  1379. }
  1380. extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
  1381. {
  1382. VERIFY(g_scheduler_lock.own_lock());
  1383. // Because init_finished() will wait on the other APs, we need
  1384. // to release the scheduler lock so that the other APs can also get
  1385. // to this point
  1386. // The target flags will get restored upon leaving the trap
  1387. u32 prev_flags = cpu_flags();
  1388. Scheduler::leave_on_first_switch(prev_flags);
  1389. }
  1390. extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
  1391. {
  1392. // We need to re-acquire the scheduler lock before a context switch
  1393. // transfers control into the idle loop, which needs the lock held
  1394. Scheduler::prepare_for_idle_loop();
  1395. }
  1396. UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
  1397. {
  1398. VERIFY(initial_thread.process().is_kernel_process());
  1399. auto& tss = initial_thread.tss();
  1400. m_tss = tss;
  1401. m_tss.esp0 = tss.esp0;
  1402. m_tss.ss0 = GDT_SELECTOR_DATA0;
  1403. // user mode needs to be able to switch to kernel mode:
  1404. m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
  1405. m_tss.fs = GDT_SELECTOR_PROC | 3;
  1406. m_scheduler_initialized = true;
  1407. // clang-format off
  1408. asm volatile(
  1409. "movl %[new_esp], %%esp \n" // switch to new stack
  1410. "pushl %[from_to_thread] \n" // to_thread
  1411. "pushl %[from_to_thread] \n" // from_thread
  1412. "pushl $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n"
  1413. "pushl %[new_eip] \n" // save the entry eip to the stack
  1414. "movl %%esp, %%ebx \n"
  1415. "addl $20, %%ebx \n" // calculate pointer to TrapFrame
  1416. "pushl %%ebx \n"
  1417. "cld \n"
  1418. "pushl %[cpu] \n" // push argument for init_finished before register is clobbered
  1419. "call pre_init_finished \n"
  1420. "call init_finished \n"
  1421. "addl $4, %%esp \n"
  1422. "call post_init_finished \n"
  1423. "call enter_trap_no_irq \n"
  1424. "addl $4, %%esp \n"
  1425. "lret \n"
  1426. :: [new_esp] "g" (tss.esp),
  1427. [new_eip] "a" (tss.eip),
  1428. [from_to_thread] "b" (&initial_thread),
  1429. [cpu] "c" (id())
  1430. );
  1431. // clang-format on
  1432. VERIFY_NOT_REACHED();
  1433. }
  1434. void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
  1435. {
  1436. VERIFY_INTERRUPTS_DISABLED();
  1437. VERIFY(&Processor::current() == this);
  1438. trap.prev_irq_level = m_in_irq;
  1439. if (raise_irq)
  1440. m_in_irq++;
  1441. auto* current_thread = Processor::current_thread();
  1442. if (current_thread) {
  1443. auto& current_trap = current_thread->current_trap();
  1444. trap.next_trap = current_trap;
  1445. current_trap = &trap;
  1446. // The cs register of this trap tells us where we will return back to
  1447. current_thread->set_previous_mode(((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
  1448. } else {
  1449. trap.next_trap = nullptr;
  1450. }
  1451. }
  1452. void Processor::exit_trap(TrapFrame& trap)
  1453. {
  1454. VERIFY_INTERRUPTS_DISABLED();
  1455. VERIFY(&Processor::current() == this);
  1456. VERIFY(m_in_irq >= trap.prev_irq_level);
  1457. m_in_irq = trap.prev_irq_level;
  1458. smp_process_pending_messages();
  1459. if (!m_in_irq && !m_in_critical)
  1460. check_invoke_scheduler();
  1461. auto* current_thread = Processor::current_thread();
  1462. if (current_thread) {
  1463. auto& current_trap = current_thread->current_trap();
  1464. current_trap = trap.next_trap;
  1465. if (current_trap) {
  1466. VERIFY(current_trap->regs);
  1467. // If we have another higher level trap then we probably returned
  1468. // from an interrupt or irq handler. The cs register of the
  1469. // new/higher level trap tells us what the mode prior to it was
  1470. current_thread->set_previous_mode(((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
  1471. } else {
  1472. // If we don't have a higher level trap then we're back in user mode.
  1473. // Unless we're a kernel process, in which case we're always in kernel mode
  1474. current_thread->set_previous_mode(current_thread->process().is_kernel_process() ? Thread::PreviousMode::KernelMode : Thread::PreviousMode::UserMode);
  1475. }
  1476. }
  1477. }
  1478. void Processor::check_invoke_scheduler()
  1479. {
  1480. VERIFY(!m_in_irq);
  1481. VERIFY(!m_in_critical);
  1482. if (m_invoke_scheduler_async && m_scheduler_initialized) {
  1483. m_invoke_scheduler_async = false;
  1484. Scheduler::invoke_async();
  1485. }
  1486. }
  1487. void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
  1488. {
  1489. auto ptr = vaddr.as_ptr();
  1490. while (page_count > 0) {
  1491. // clang-format off
  1492. asm volatile("invlpg %0"
  1493. :
  1494. : "m"(*ptr)
  1495. : "memory");
  1496. // clang-format on
  1497. ptr += PAGE_SIZE;
  1498. page_count--;
  1499. }
  1500. }
  1501. void Processor::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
  1502. {
  1503. if (s_smp_enabled && (!is_user_address(vaddr) || Process::current()->thread_count() > 1))
  1504. smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
  1505. else
  1506. flush_tlb_local(vaddr, page_count);
  1507. }
  1508. static volatile ProcessorMessage* s_message_pool;
  1509. void Processor::smp_return_to_pool(ProcessorMessage& msg)
  1510. {
  1511. ProcessorMessage* next = nullptr;
  1512. do {
  1513. msg.next = next;
  1514. } while (!atomic_compare_exchange_strong(&s_message_pool, next, &msg, AK::MemoryOrder::memory_order_acq_rel));
  1515. }
  1516. ProcessorMessage& Processor::smp_get_from_pool()
  1517. {
  1518. ProcessorMessage* msg;
  1519. // The assumption is that messages are never removed from the pool!
  1520. for (;;) {
  1521. msg = atomic_load(&s_message_pool, AK::MemoryOrder::memory_order_consume);
  1522. if (!msg) {
  1523. if (!Processor::current().smp_process_pending_messages()) {
  1524. // TODO: pause for a bit?
  1525. }
  1526. continue;
  1527. }
  1528. // If another processor were to use this message in the meanwhile,
  1529. // "msg" is still valid (because it never gets freed). We'd detect
  1530. // this because the expected value "msg" and pool would
  1531. // no longer match, and the compare_exchange will fail. But accessing
  1532. // "msg->next" is always safe here.
  1533. if (atomic_compare_exchange_strong(&s_message_pool, msg, msg->next, AK::MemoryOrder::memory_order_acq_rel)) {
  1534. // We successfully "popped" this available message
  1535. break;
  1536. }
  1537. }
  1538. VERIFY(msg != nullptr);
  1539. return *msg;
  1540. }
  1541. Atomic<u32> Processor::s_idle_cpu_mask { 0 };
  1542. u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
  1543. {
  1544. VERIFY(Processor::current().in_critical());
  1545. VERIFY(wake_count > 0);
  1546. if (!s_smp_enabled)
  1547. return 0;
  1548. // Wake at most N - 1 processors
  1549. if (wake_count >= Processor::count()) {
  1550. wake_count = Processor::count() - 1;
  1551. VERIFY(wake_count > 0);
  1552. }
  1553. u32 current_id = Processor::current().id();
  1554. u32 did_wake_count = 0;
  1555. auto& apic = APIC::the();
  1556. while (did_wake_count < wake_count) {
  1557. // Try to get a set of idle CPUs and flip them to busy
  1558. u32 idle_mask = s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
  1559. u32 idle_count = __builtin_popcountl(idle_mask);
  1560. if (idle_count == 0)
  1561. break; // No (more) idle processor available
  1562. u32 found_mask = 0;
  1563. for (u32 i = 0; i < idle_count; i++) {
  1564. u32 cpu = __builtin_ffsl(idle_mask) - 1;
  1565. idle_mask &= ~(1u << cpu);
  1566. found_mask |= 1u << cpu;
  1567. }
  1568. idle_mask = s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
  1569. if (idle_mask == 0)
  1570. continue; // All of them were flipped to busy, try again
  1571. idle_count = __builtin_popcountl(idle_mask);
  1572. for (u32 i = 0; i < idle_count; i++) {
  1573. u32 cpu = __builtin_ffsl(idle_mask) - 1;
  1574. idle_mask &= ~(1u << cpu);
  1575. // Send an IPI to that CPU to wake it up. There is a possibility
  1576. // someone else woke it up as well, or that it woke up due to
  1577. // a timer interrupt. But we tried hard to avoid this...
  1578. apic.send_ipi(cpu);
  1579. did_wake_count++;
  1580. }
  1581. }
  1582. return did_wake_count;
  1583. }
  1584. UNMAP_AFTER_INIT void Processor::smp_enable()
  1585. {
  1586. size_t msg_pool_size = Processor::count() * 100u;
  1587. size_t msg_entries_cnt = Processor::count();
  1588. auto msgs = new ProcessorMessage[msg_pool_size];
  1589. auto msg_entries = new ProcessorMessageEntry[msg_pool_size * msg_entries_cnt];
  1590. size_t msg_entry_i = 0;
  1591. for (size_t i = 0; i < msg_pool_size; i++, msg_entry_i += msg_entries_cnt) {
  1592. auto& msg = msgs[i];
  1593. msg.next = i < msg_pool_size - 1 ? &msgs[i + 1] : nullptr;
  1594. msg.per_proc_entries = &msg_entries[msg_entry_i];
  1595. for (size_t k = 0; k < msg_entries_cnt; k++)
  1596. msg_entries[msg_entry_i + k].msg = &msg;
  1597. }
  1598. atomic_store(&s_message_pool, &msgs[0], AK::MemoryOrder::memory_order_release);
  1599. // Start sending IPI messages
  1600. s_smp_enabled = true;
  1601. }
  1602. void Processor::smp_cleanup_message(ProcessorMessage& msg)
  1603. {
  1604. switch (msg.type) {
  1605. case ProcessorMessage::CallbackWithData:
  1606. if (msg.callback_with_data.free)
  1607. msg.callback_with_data.free(msg.callback_with_data.data);
  1608. break;
  1609. default:
  1610. break;
  1611. }
  1612. }
  1613. bool Processor::smp_process_pending_messages()
  1614. {
  1615. bool did_process = false;
  1616. u32 prev_flags;
  1617. enter_critical(prev_flags);
  1618. if (auto pending_msgs = atomic_exchange(&m_message_queue, nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
  1619. // We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
  1620. auto reverse_list =
  1621. [](ProcessorMessageEntry* list) -> ProcessorMessageEntry* {
  1622. ProcessorMessageEntry* rev_list = nullptr;
  1623. while (list) {
  1624. auto next = list->next;
  1625. list->next = rev_list;
  1626. rev_list = list;
  1627. list = next;
  1628. }
  1629. return rev_list;
  1630. };
  1631. pending_msgs = reverse_list(pending_msgs);
  1632. // now process in the right order
  1633. ProcessorMessageEntry* next_msg;
  1634. for (auto cur_msg = pending_msgs; cur_msg; cur_msg = next_msg) {
  1635. next_msg = cur_msg->next;
  1636. auto msg = cur_msg->msg;
  1637. dbgln_if(SMP_DEBUG, "SMP[{}]: Processing message {}", id(), VirtualAddress(msg));
  1638. switch (msg->type) {
  1639. case ProcessorMessage::Callback:
  1640. msg->callback.handler();
  1641. break;
  1642. case ProcessorMessage::CallbackWithData:
  1643. msg->callback_with_data.handler(msg->callback_with_data.data);
  1644. break;
  1645. case ProcessorMessage::FlushTlb:
  1646. if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
  1647. // We assume that we don't cross into kernel land!
  1648. VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
  1649. if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
  1650. // This processor isn't using this page directory right now, we can ignore this request
  1651. dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
  1652. break;
  1653. }
  1654. }
  1655. flush_tlb_local(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count);
  1656. break;
  1657. }
  1658. bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
  1659. auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
  1660. VERIFY(prev_refs != 0);
  1661. if (prev_refs == 1) {
  1662. // All processors handled this. If this is an async message,
  1663. // we need to clean it up and return it to the pool
  1664. if (is_async) {
  1665. smp_cleanup_message(*msg);
  1666. smp_return_to_pool(*msg);
  1667. }
  1668. }
  1669. if (m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed))
  1670. halt_this();
  1671. }
  1672. did_process = true;
  1673. } else if (m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed)) {
  1674. halt_this();
  1675. }
  1676. leave_critical(prev_flags);
  1677. return did_process;
  1678. }
  1679. bool Processor::smp_queue_message(ProcessorMessage& msg)
  1680. {
  1681. // Note that it's quite possible that the other processor may pop
  1682. // the queue at any given time. We rely on the fact that the messages
  1683. // are pooled and never get freed!
  1684. auto& msg_entry = msg.per_proc_entries[id()];
  1685. VERIFY(msg_entry.msg == &msg);
  1686. ProcessorMessageEntry* next = nullptr;
  1687. do {
  1688. msg_entry.next = next;
  1689. } while (!atomic_compare_exchange_strong(&m_message_queue, next, &msg_entry, AK::MemoryOrder::memory_order_acq_rel));
  1690. return next == nullptr;
  1691. }
  1692. void Processor::smp_broadcast_message(ProcessorMessage& msg)
  1693. {
  1694. auto& cur_proc = Processor::current();
  1695. dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
  1696. atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
  1697. VERIFY(msg.refs > 0);
  1698. bool need_broadcast = false;
  1699. for_each(
  1700. [&](Processor& proc) -> IterationDecision {
  1701. if (&proc != &cur_proc) {
  1702. if (proc.smp_queue_message(msg))
  1703. need_broadcast = true;
  1704. }
  1705. return IterationDecision::Continue;
  1706. });
  1707. // Now trigger an IPI on all other APs (unless all targets already had messages queued)
  1708. if (need_broadcast)
  1709. APIC::the().broadcast_ipi();
  1710. }
  1711. void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
  1712. {
  1713. auto& cur_proc = Processor::current();
  1714. VERIFY(!msg.async);
  1715. // If synchronous then we must cleanup and return the message back
  1716. // to the pool. Otherwise, the last processor to complete it will return it
  1717. while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
  1718. // TODO: pause for a bit?
  1719. // We need to process any messages that may have been sent to
  1720. // us while we're waiting. This also checks if another processor
  1721. // may have requested us to halt.
  1722. cur_proc.smp_process_pending_messages();
  1723. }
  1724. smp_cleanup_message(msg);
  1725. smp_return_to_pool(msg);
  1726. }
  1727. void Processor::smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async)
  1728. {
  1729. auto& msg = smp_get_from_pool();
  1730. msg.async = async;
  1731. msg.type = ProcessorMessage::CallbackWithData;
  1732. msg.callback_with_data.handler = callback;
  1733. msg.callback_with_data.data = data;
  1734. msg.callback_with_data.free = free_data;
  1735. smp_broadcast_message(msg);
  1736. if (!async)
  1737. smp_broadcast_wait_sync(msg);
  1738. }
  1739. void Processor::smp_broadcast(void (*callback)(), bool async)
  1740. {
  1741. auto& msg = smp_get_from_pool();
  1742. msg.async = async;
  1743. msg.type = ProcessorMessage::CallbackWithData;
  1744. msg.callback.handler = callback;
  1745. smp_broadcast_message(msg);
  1746. if (!async)
  1747. smp_broadcast_wait_sync(msg);
  1748. }
  1749. void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
  1750. {
  1751. auto& cur_proc = Processor::current();
  1752. VERIFY(cpu != cur_proc.get_id());
  1753. auto& target_proc = processors()[cpu];
  1754. msg.async = async;
  1755. dbgln_if(SMP_DEBUG, "SMP[{}]: Send message {} to cpu #{} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_proc));
  1756. atomic_store(&msg.refs, 1u, AK::MemoryOrder::memory_order_release);
  1757. if (target_proc->smp_queue_message(msg)) {
  1758. APIC::the().send_ipi(cpu);
  1759. }
  1760. if (!async) {
  1761. // If synchronous then we must cleanup and return the message back
  1762. // to the pool. Otherwise, the last processor to complete it will return it
  1763. while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
  1764. // TODO: pause for a bit?
  1765. // We need to process any messages that may have been sent to
  1766. // us while we're waiting. This also checks if another processor
  1767. // may have requested us to halt.
  1768. cur_proc.smp_process_pending_messages();
  1769. }
  1770. smp_cleanup_message(msg);
  1771. smp_return_to_pool(msg);
  1772. }
  1773. }
  1774. void Processor::smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async)
  1775. {
  1776. auto& msg = smp_get_from_pool();
  1777. msg.type = ProcessorMessage::CallbackWithData;
  1778. msg.callback_with_data.handler = callback;
  1779. msg.callback_with_data.data = data;
  1780. msg.callback_with_data.free = free_data;
  1781. smp_unicast_message(cpu, msg, async);
  1782. }
  1783. void Processor::smp_unicast(u32 cpu, void (*callback)(), bool async)
  1784. {
  1785. auto& msg = smp_get_from_pool();
  1786. msg.type = ProcessorMessage::CallbackWithData;
  1787. msg.callback.handler = callback;
  1788. smp_unicast_message(cpu, msg, async);
  1789. }
  1790. void Processor::smp_broadcast_flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
  1791. {
  1792. auto& msg = smp_get_from_pool();
  1793. msg.async = false;
  1794. msg.type = ProcessorMessage::FlushTlb;
  1795. msg.flush_tlb.page_directory = page_directory;
  1796. msg.flush_tlb.ptr = vaddr.as_ptr();
  1797. msg.flush_tlb.page_count = page_count;
  1798. smp_broadcast_message(msg);
  1799. // While the other processors handle this request, we'll flush ours
  1800. flush_tlb_local(vaddr, page_count);
  1801. // Now wait until everybody is done as well
  1802. smp_broadcast_wait_sync(msg);
  1803. }
  1804. void Processor::smp_broadcast_halt()
  1805. {
  1806. // We don't want to use a message, because this could have been triggered
  1807. // by being out of memory and we might not be able to get a message
  1808. for_each(
  1809. [&](Processor& proc) -> IterationDecision {
  1810. proc.m_halt_requested.store(true, AK::MemoryOrder::memory_order_release);
  1811. return IterationDecision::Continue;
  1812. });
  1813. // Now trigger an IPI on all other APs
  1814. APIC::the().broadcast_ipi();
  1815. }
  1816. void Processor::Processor::halt()
  1817. {
  1818. if (s_smp_enabled)
  1819. smp_broadcast_halt();
  1820. halt_this();
  1821. }
  1822. UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
  1823. {
  1824. size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]);
  1825. for (size_t i = 0; i < pool_count; i++) {
  1826. auto& entry = m_deferred_call_pool[i];
  1827. entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
  1828. entry.was_allocated = false;
  1829. }
  1830. m_pending_deferred_calls = nullptr;
  1831. m_free_deferred_call_pool_entry = &m_deferred_call_pool[0];
  1832. }
  1833. void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
  1834. {
  1835. VERIFY(m_in_critical);
  1836. VERIFY(!entry->was_allocated);
  1837. entry->next = m_free_deferred_call_pool_entry;
  1838. m_free_deferred_call_pool_entry = entry;
  1839. }
  1840. DeferredCallEntry* Processor::deferred_call_get_free()
  1841. {
  1842. VERIFY(m_in_critical);
  1843. if (m_free_deferred_call_pool_entry) {
  1844. // Fast path, we have an entry in our pool
  1845. auto* entry = m_free_deferred_call_pool_entry;
  1846. m_free_deferred_call_pool_entry = entry->next;
  1847. VERIFY(!entry->was_allocated);
  1848. return entry;
  1849. }
  1850. auto* entry = new DeferredCallEntry;
  1851. entry->was_allocated = true;
  1852. return entry;
  1853. }
  1854. void Processor::deferred_call_execute_pending()
  1855. {
  1856. VERIFY(m_in_critical);
  1857. if (!m_pending_deferred_calls)
  1858. return;
  1859. auto* pending_list = m_pending_deferred_calls;
  1860. m_pending_deferred_calls = nullptr;
  1861. // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
  1862. auto reverse_list =
  1863. [](DeferredCallEntry* list) -> DeferredCallEntry* {
  1864. DeferredCallEntry* rev_list = nullptr;
  1865. while (list) {
  1866. auto next = list->next;
  1867. list->next = rev_list;
  1868. rev_list = list;
  1869. list = next;
  1870. }
  1871. return rev_list;
  1872. };
  1873. pending_list = reverse_list(pending_list);
  1874. do {
  1875. // Call the appropriate callback handler
  1876. if (pending_list->have_data) {
  1877. pending_list->callback_with_data.handler(pending_list->callback_with_data.data);
  1878. if (pending_list->callback_with_data.free)
  1879. pending_list->callback_with_data.free(pending_list->callback_with_data.data);
  1880. } else {
  1881. pending_list->callback.handler();
  1882. }
  1883. // Return the entry back to the pool, or free it
  1884. auto* next = pending_list->next;
  1885. if (pending_list->was_allocated)
  1886. delete pending_list;
  1887. else
  1888. deferred_call_return_to_pool(pending_list);
  1889. pending_list = next;
  1890. } while (pending_list);
  1891. }
  1892. void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
  1893. {
  1894. VERIFY(m_in_critical);
  1895. entry->next = m_pending_deferred_calls;
  1896. m_pending_deferred_calls = entry;
  1897. }
  1898. void Processor::deferred_call_queue(void (*callback)())
  1899. {
  1900. // NOTE: If we are called outside of a critical section and outside
  1901. // of an irq handler, the function will be executed before we return!
  1902. ScopedCritical critical;
  1903. auto& cur_proc = Processor::current();
  1904. auto* entry = cur_proc.deferred_call_get_free();
  1905. entry->have_data = false;
  1906. entry->callback.handler = callback;
  1907. cur_proc.deferred_call_queue_entry(entry);
  1908. }
  1909. void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*))
  1910. {
  1911. // NOTE: If we are called outside of a critical section and outside
  1912. // of an irq handler, the function will be executed before we return!
  1913. ScopedCritical critical;
  1914. auto& cur_proc = Processor::current();
  1915. auto* entry = cur_proc.deferred_call_get_free();
  1916. entry->have_data = true;
  1917. entry->callback_with_data.handler = callback;
  1918. entry->callback_with_data.data = data;
  1919. entry->callback_with_data.free = free_data;
  1920. cur_proc.deferred_call_queue_entry(entry);
  1921. }
  1922. UNMAP_AFTER_INIT void Processor::gdt_init()
  1923. {
  1924. m_gdt_length = 0;
  1925. m_gdtr.address = nullptr;
  1926. m_gdtr.limit = 0;
  1927. write_raw_gdt_entry(0x0000, 0x00000000, 0x00000000);
  1928. write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00cf9a00); // code0
  1929. write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00cf9200); // data0
  1930. write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00cffa00); // code3
  1931. write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x00cff200); // data3
  1932. Descriptor tls_descriptor;
  1933. tls_descriptor.low = tls_descriptor.high = 0;
  1934. tls_descriptor.dpl = 3;
  1935. tls_descriptor.segment_present = 1;
  1936. tls_descriptor.granularity = 0;
  1937. tls_descriptor.zero = 0;
  1938. tls_descriptor.operation_size = 1;
  1939. tls_descriptor.descriptor_type = 1;
  1940. tls_descriptor.type = 2;
  1941. write_gdt_entry(GDT_SELECTOR_TLS, tls_descriptor); // tls3
  1942. Descriptor fs_descriptor;
  1943. fs_descriptor.set_base(this);
  1944. fs_descriptor.set_limit(sizeof(Processor));
  1945. fs_descriptor.dpl = 0;
  1946. fs_descriptor.segment_present = 1;
  1947. fs_descriptor.granularity = 0;
  1948. fs_descriptor.zero = 0;
  1949. fs_descriptor.operation_size = 1;
  1950. fs_descriptor.descriptor_type = 1;
  1951. fs_descriptor.type = 2;
  1952. write_gdt_entry(GDT_SELECTOR_PROC, fs_descriptor); // fs0
  1953. Descriptor tss_descriptor;
  1954. tss_descriptor.set_base(&m_tss);
  1955. tss_descriptor.set_limit(sizeof(TSS32));
  1956. tss_descriptor.dpl = 0;
  1957. tss_descriptor.segment_present = 1;
  1958. tss_descriptor.granularity = 0;
  1959. tss_descriptor.zero = 0;
  1960. tss_descriptor.operation_size = 1;
  1961. tss_descriptor.descriptor_type = 0;
  1962. tss_descriptor.type = 9;
  1963. write_gdt_entry(GDT_SELECTOR_TSS, tss_descriptor); // tss
  1964. flush_gdt();
  1965. load_task_register(GDT_SELECTOR_TSS);
  1966. asm volatile(
  1967. "mov %%ax, %%ds\n"
  1968. "mov %%ax, %%es\n"
  1969. "mov %%ax, %%gs\n"
  1970. "mov %%ax, %%ss\n" ::"a"(GDT_SELECTOR_DATA0)
  1971. : "memory");
  1972. set_fs(GDT_SELECTOR_PROC);
  1973. // Make sure CS points to the kernel code descriptor.
  1974. // clang-format off
  1975. asm volatile(
  1976. "ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
  1977. "sanity:\n");
  1978. // clang-format on
  1979. }
  1980. void Processor::set_thread_specific(u8* data, size_t len)
  1981. {
  1982. auto& descriptor = get_gdt_entry(GDT_SELECTOR_TLS);
  1983. descriptor.set_base(data);
  1984. descriptor.set_limit(len);
  1985. }
  1986. void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_regs, const RegisterState& kernel_regs)
  1987. {
  1988. ptrace_regs.eax = kernel_regs.eax,
  1989. ptrace_regs.ecx = kernel_regs.ecx,
  1990. ptrace_regs.edx = kernel_regs.edx,
  1991. ptrace_regs.ebx = kernel_regs.ebx,
  1992. ptrace_regs.esp = kernel_regs.userspace_esp,
  1993. ptrace_regs.ebp = kernel_regs.ebp,
  1994. ptrace_regs.esi = kernel_regs.esi,
  1995. ptrace_regs.edi = kernel_regs.edi,
  1996. ptrace_regs.eip = kernel_regs.eip,
  1997. ptrace_regs.eflags = kernel_regs.eflags,
  1998. ptrace_regs.cs = 0;
  1999. ptrace_regs.ss = 0;
  2000. ptrace_regs.ds = 0;
  2001. ptrace_regs.es = 0;
  2002. ptrace_regs.fs = 0;
  2003. ptrace_regs.gs = 0;
  2004. }
  2005. void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_regs, const PtraceRegisters& ptrace_regs)
  2006. {
  2007. kernel_regs.eax = ptrace_regs.eax;
  2008. kernel_regs.ecx = ptrace_regs.ecx;
  2009. kernel_regs.edx = ptrace_regs.edx;
  2010. kernel_regs.ebx = ptrace_regs.ebx;
  2011. kernel_regs.esp = ptrace_regs.esp;
  2012. kernel_regs.ebp = ptrace_regs.ebp;
  2013. kernel_regs.esi = ptrace_regs.esi;
  2014. kernel_regs.edi = ptrace_regs.edi;
  2015. kernel_regs.eip = ptrace_regs.eip;
  2016. kernel_regs.eflags = (kernel_regs.eflags & ~safe_eflags_mask) | (ptrace_regs.eflags & safe_eflags_mask);
  2017. }
  2018. }
  2019. #ifdef DEBUG
  2020. void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
  2021. {
  2022. asm volatile("cli");
  2023. dmesgln("ASSERTION FAILED: {}", msg);
  2024. dmesgln("{}:{} in {}", file, line, func);
  2025. // Switch back to the current process's page tables if there are any.
  2026. // Otherwise stack walking will be a disaster.
  2027. auto process = Process::current();
  2028. if (process)
  2029. MM.enter_process_paging_scope(*process);
  2030. Kernel::dump_backtrace();
  2031. Processor::halt();
  2032. }
  2033. #endif
  2034. NonMaskableInterruptDisabler::NonMaskableInterruptDisabler()
  2035. {
  2036. IO::out8(0x70, IO::in8(0x70) | 0x80);
  2037. }
  2038. NonMaskableInterruptDisabler::~NonMaskableInterruptDisabler()
  2039. {
  2040. IO::out8(0x70, IO::in8(0x70) & 0x7F);
  2041. }