Processor.cpp 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BuiltinWrappers.h>
  7. #include <AK/Format.h>
  8. #include <AK/StdLibExtras.h>
  9. #include <AK/String.h>
  10. #include <AK/Types.h>
  11. #include <Kernel/Interrupts/APIC.h>
  12. #include <Kernel/Process.h>
  13. #include <Kernel/Sections.h>
  14. #include <Kernel/StdLib.h>
  15. #include <Kernel/Thread.h>
  16. #include <Kernel/Arch/Processor.h>
  17. #include <Kernel/Arch/ScopedCritical.h>
  18. #include <Kernel/Arch/x86/CPUID.h>
  19. #include <Kernel/Arch/x86/InterruptDisabler.h>
  20. #include <Kernel/Arch/x86/Interrupts.h>
  21. #include <Kernel/Arch/x86/MSR.h>
  22. #include <Kernel/Arch/x86/ProcessorInfo.h>
  23. #include <Kernel/Arch/x86/SafeMem.h>
  24. #include <Kernel/Arch/x86/TrapFrame.h>
  25. #include <Kernel/Memory/PageDirectory.h>
  26. #include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
  27. namespace Kernel {
  28. READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
  29. READONLY_AFTER_INIT static ProcessorContainer s_processors {};
  30. READONLY_AFTER_INIT Atomic<u32> Processor::g_total_processors;
  31. READONLY_AFTER_INIT static volatile bool s_smp_enabled;
  32. static Atomic<ProcessorMessage*> s_message_pool;
  33. Atomic<u32> Processor::s_idle_cpu_mask { 0 };
  34. // The compiler can't see the calls to these functions inside assembly.
  35. // Declare them, to avoid dead code warnings.
  36. extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
  37. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
  38. extern "C" FlatPtr do_init_context(Thread* thread, u32 flags) __attribute__((used));
  39. extern "C" void syscall_entry();
  40. bool Processor::is_smp_enabled()
  41. {
  42. return s_smp_enabled;
  43. }
  44. UNMAP_AFTER_INIT static void sse_init()
  45. {
  46. write_cr0((read_cr0() & 0xfffffffbu) | 0x2);
  47. write_cr4(read_cr4() | 0x600);
  48. }
  49. void exit_kernel_thread(void)
  50. {
  51. Thread::current()->exit();
  52. }
  53. UNMAP_AFTER_INIT void Processor::cpu_detect()
  54. {
  55. // NOTE: This is called during Processor::early_initialize, we cannot
  56. // safely log at this point because we don't have kmalloc
  57. // initialized yet!
  58. auto set_feature =
  59. [&](CPUFeature f) {
  60. m_features = static_cast<CPUFeature>(static_cast<u32>(m_features) | static_cast<u32>(f));
  61. };
  62. m_features = static_cast<CPUFeature>(0);
  63. CPUID processor_info(0x1);
  64. if (processor_info.edx() & (1 << 4))
  65. set_feature(CPUFeature::TSC);
  66. if (processor_info.edx() & (1 << 6))
  67. set_feature(CPUFeature::PAE);
  68. if (processor_info.edx() & (1 << 13))
  69. set_feature(CPUFeature::PGE);
  70. if (processor_info.edx() & (1 << 23))
  71. set_feature(CPUFeature::MMX);
  72. if (processor_info.edx() & (1 << 24))
  73. set_feature(CPUFeature::FXSR);
  74. if (processor_info.edx() & (1 << 25))
  75. set_feature(CPUFeature::SSE);
  76. if (processor_info.edx() & (1 << 26))
  77. set_feature(CPUFeature::SSE2);
  78. if (processor_info.ecx() & (1 << 0))
  79. set_feature(CPUFeature::SSE3);
  80. if (processor_info.ecx() & (1 << 9))
  81. set_feature(CPUFeature::SSSE3);
  82. if (processor_info.ecx() & (1 << 19))
  83. set_feature(CPUFeature::SSE4_1);
  84. if (processor_info.ecx() & (1 << 20))
  85. set_feature(CPUFeature::SSE4_2);
  86. if (processor_info.ecx() & (1 << 26))
  87. set_feature(CPUFeature::XSAVE);
  88. if (processor_info.ecx() & (1 << 28))
  89. set_feature(CPUFeature::AVX);
  90. if (processor_info.ecx() & (1 << 30))
  91. set_feature(CPUFeature::RDRAND);
  92. if (processor_info.ecx() & (1u << 31))
  93. set_feature(CPUFeature::HYPERVISOR);
  94. if (processor_info.edx() & (1 << 11)) {
  95. u32 stepping = processor_info.eax() & 0xf;
  96. u32 model = (processor_info.eax() >> 4) & 0xf;
  97. u32 family = (processor_info.eax() >> 8) & 0xf;
  98. if (!(family == 6 && model < 3 && stepping < 3))
  99. set_feature(CPUFeature::SEP);
  100. if ((family == 6 && model >= 3) || (family == 0xf && model >= 0xe))
  101. set_feature(CPUFeature::CONSTANT_TSC);
  102. }
  103. u32 max_extended_leaf = CPUID(0x80000000).eax();
  104. if (max_extended_leaf >= 0x80000001) {
  105. CPUID extended_processor_info(0x80000001);
  106. if (extended_processor_info.edx() & (1 << 20))
  107. set_feature(CPUFeature::NX);
  108. if (extended_processor_info.edx() & (1 << 27))
  109. set_feature(CPUFeature::RDTSCP);
  110. if (extended_processor_info.edx() & (1 << 29))
  111. set_feature(CPUFeature::LM);
  112. if (extended_processor_info.edx() & (1 << 11)) {
  113. // Only available in 64 bit mode
  114. set_feature(CPUFeature::SYSCALL);
  115. }
  116. }
  117. if (max_extended_leaf >= 0x80000007) {
  118. CPUID cpuid(0x80000007);
  119. if (cpuid.edx() & (1 << 8)) {
  120. set_feature(CPUFeature::CONSTANT_TSC);
  121. set_feature(CPUFeature::NONSTOP_TSC);
  122. }
  123. }
  124. if (max_extended_leaf >= 0x80000008) {
  125. // CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
  126. CPUID cpuid(0x80000008);
  127. m_physical_address_bit_width = cpuid.eax() & 0xff;
  128. // CPUID.80000008H:EAX[15:8] reports the linear-address width supported by the processor.
  129. m_virtual_address_bit_width = (cpuid.eax() >> 8) & 0xff;
  130. } else {
  131. // For processors that do not support CPUID function 80000008H, the width is generally 36 if CPUID.01H:EDX.PAE [bit 6] = 1 and 32 otherwise.
  132. m_physical_address_bit_width = has_feature(CPUFeature::PAE) ? 36 : 32;
  133. // Processors that do not support CPUID function 80000008H, support a linear-address width of 32.
  134. m_virtual_address_bit_width = 32;
  135. }
  136. CPUID extended_features(0x7);
  137. if (extended_features.ebx() & (1 << 20))
  138. set_feature(CPUFeature::SMAP);
  139. if (extended_features.ebx() & (1 << 7))
  140. set_feature(CPUFeature::SMEP);
  141. if (extended_features.ecx() & (1 << 2))
  142. set_feature(CPUFeature::UMIP);
  143. if (extended_features.ebx() & (1 << 18))
  144. set_feature(CPUFeature::RDSEED);
  145. }
  146. UNMAP_AFTER_INIT void Processor::cpu_setup()
  147. {
  148. // NOTE: This is called during Processor::early_initialize, we cannot
  149. // safely log at this point because we don't have kmalloc
  150. // initialized yet!
  151. cpu_detect();
  152. if (has_feature(CPUFeature::SSE)) {
  153. // enter_thread_context() assumes that if a x86 CPU supports SSE then it also supports FXSR.
  154. // SSE support without FXSR is an extremely unlikely scenario, so let's be pragmatic about it.
  155. VERIFY(has_feature(CPUFeature::FXSR));
  156. sse_init();
  157. }
  158. write_cr0(read_cr0() | 0x00010000);
  159. if (has_feature(CPUFeature::PGE)) {
  160. // Turn on CR4.PGE so the CPU will respect the G bit in page tables.
  161. write_cr4(read_cr4() | 0x80);
  162. }
  163. if (has_feature(CPUFeature::NX)) {
  164. // Turn on IA32_EFER.NXE
  165. MSR ia32_efer(MSR_IA32_EFER);
  166. ia32_efer.set(ia32_efer.get() | 0x800);
  167. }
  168. if (has_feature(CPUFeature::SMEP)) {
  169. // Turn on CR4.SMEP
  170. write_cr4(read_cr4() | 0x100000);
  171. }
  172. if (has_feature(CPUFeature::SMAP)) {
  173. // Turn on CR4.SMAP
  174. write_cr4(read_cr4() | 0x200000);
  175. }
  176. if (has_feature(CPUFeature::UMIP)) {
  177. write_cr4(read_cr4() | 0x800);
  178. }
  179. if (has_feature(CPUFeature::TSC)) {
  180. write_cr4(read_cr4() | 0x4);
  181. }
  182. if (has_feature(CPUFeature::XSAVE)) {
  183. // Turn on CR4.OSXSAVE
  184. write_cr4(read_cr4() | 0x40000);
  185. // According to the Intel manual: "After reset, all bits (except bit 0) in XCR0 are cleared to zero; XCR0[0] is set to 1."
  186. // Sadly we can't trust this, for example VirtualBox starts with bits 0-4 set, so let's do it ourselves.
  187. write_xcr0(0x1);
  188. if (has_feature(CPUFeature::AVX)) {
  189. // Turn on SSE, AVX and x87 flags
  190. write_xcr0(read_xcr0() | 0x7);
  191. }
  192. }
  193. #if ARCH(X86_64)
  194. // x86_64 processors must have the syscall feature.
  195. VERIFY(has_feature(CPUFeature::SYSCALL));
  196. MSR efer_msr(MSR_EFER);
  197. efer_msr.set(efer_msr.get() | 1u);
  198. // Write code and stack selectors to the STAR MSR. The first value stored in bits 63:48 controls the sysret CS (value + 0x10) and SS (value + 0x8),
  199. // and the value stored in bits 47:32 controls the syscall CS (value) and SS (value + 0x8).
  200. u64 star = 0;
  201. star |= 0x13ul << 48u;
  202. star |= 0x08ul << 32u;
  203. MSR star_msr(MSR_STAR);
  204. star_msr.set(star);
  205. // Write the syscall entry point to the LSTAR MSR, and write the SFMASK MSR to clear rflags upon entry.
  206. // The userspace rflags will be preserved in r11.
  207. MSR lstar_msr(MSR_LSTAR);
  208. MSR sfmask_msr(MSR_SFMASK);
  209. lstar_msr.set(reinterpret_cast<u64>(&syscall_entry));
  210. sfmask_msr.set(~0x2);
  211. #endif
  212. }
  213. String Processor::features_string() const
  214. {
  215. StringBuilder builder;
  216. auto feature_to_str =
  217. [](CPUFeature f) -> const char* {
  218. switch (f) {
  219. case CPUFeature::NX:
  220. return "nx";
  221. case CPUFeature::PAE:
  222. return "pae";
  223. case CPUFeature::PGE:
  224. return "pge";
  225. case CPUFeature::RDRAND:
  226. return "rdrand";
  227. case CPUFeature::RDSEED:
  228. return "rdseed";
  229. case CPUFeature::SMAP:
  230. return "smap";
  231. case CPUFeature::SMEP:
  232. return "smep";
  233. case CPUFeature::SSE:
  234. return "sse";
  235. case CPUFeature::TSC:
  236. return "tsc";
  237. case CPUFeature::RDTSCP:
  238. return "rdtscp";
  239. case CPUFeature::CONSTANT_TSC:
  240. return "constant_tsc";
  241. case CPUFeature::NONSTOP_TSC:
  242. return "nonstop_tsc";
  243. case CPUFeature::UMIP:
  244. return "umip";
  245. case CPUFeature::SEP:
  246. return "sep";
  247. case CPUFeature::SYSCALL:
  248. return "syscall";
  249. case CPUFeature::MMX:
  250. return "mmx";
  251. case CPUFeature::FXSR:
  252. return "fxsr";
  253. case CPUFeature::SSE2:
  254. return "sse2";
  255. case CPUFeature::SSE3:
  256. return "sse3";
  257. case CPUFeature::SSSE3:
  258. return "ssse3";
  259. case CPUFeature::SSE4_1:
  260. return "sse4.1";
  261. case CPUFeature::SSE4_2:
  262. return "sse4.2";
  263. case CPUFeature::XSAVE:
  264. return "xsave";
  265. case CPUFeature::AVX:
  266. return "avx";
  267. case CPUFeature::LM:
  268. return "lm";
  269. case CPUFeature::HYPERVISOR:
  270. return "hypervisor";
  271. // no default statement here intentionally so that we get
  272. // a warning if a new feature is forgotten to be added here
  273. }
  274. // Shouldn't ever happen
  275. return "???";
  276. };
  277. bool first = true;
  278. for (u32 flag = 1; flag != 0; flag <<= 1) {
  279. if ((static_cast<u32>(m_features) & flag) != 0) {
  280. if (first)
  281. first = false;
  282. else
  283. builder.append(' ');
  284. auto str = feature_to_str(static_cast<CPUFeature>(flag));
  285. builder.append(str, strlen(str));
  286. }
  287. }
  288. return builder.build();
  289. }
  290. UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
  291. {
  292. m_self = this;
  293. m_cpu = cpu;
  294. m_in_irq = 0;
  295. m_in_critical = 0;
  296. m_invoke_scheduler_async = false;
  297. m_scheduler_initialized = false;
  298. m_in_scheduler = true;
  299. m_message_queue = nullptr;
  300. m_idle_thread = nullptr;
  301. m_current_thread = nullptr;
  302. m_info = nullptr;
  303. m_halt_requested = false;
  304. if (cpu == 0) {
  305. s_smp_enabled = false;
  306. g_total_processors.store(1u, AK::MemoryOrder::memory_order_release);
  307. } else {
  308. g_total_processors.fetch_add(1u, AK::MemoryOrder::memory_order_acq_rel);
  309. }
  310. deferred_call_pool_init();
  311. cpu_setup();
  312. gdt_init();
  313. VERIFY(is_initialized()); // sanity check
  314. VERIFY(&current() == this); // sanity check
  315. }
  316. UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
  317. {
  318. VERIFY(m_self == this);
  319. VERIFY(&current() == this); // sanity check
  320. dmesgln("CPU[{}]: Supported features: {}", current_id(), features_string());
  321. if (!has_feature(CPUFeature::RDRAND))
  322. dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
  323. dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
  324. dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width);
  325. if (cpu == 0)
  326. idt_init();
  327. else
  328. flush_idt();
  329. if (cpu == 0) {
  330. VERIFY((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
  331. asm volatile("fninit");
  332. if (has_feature(CPUFeature::FXSR))
  333. asm volatile("fxsave %0"
  334. : "=m"(s_clean_fpu_state));
  335. else
  336. asm volatile("fnsave %0"
  337. : "=m"(s_clean_fpu_state));
  338. if (has_feature(CPUFeature::HYPERVISOR))
  339. detect_hypervisor();
  340. }
  341. m_info = new ProcessorInfo(*this);
  342. {
  343. // We need to prevent races between APs starting up at the same time
  344. VERIFY(cpu < s_processors.size());
  345. s_processors[cpu] = this;
  346. }
  347. }
  348. UNMAP_AFTER_INIT void Processor::detect_hypervisor()
  349. {
  350. CPUID hypervisor_leaf_range(0x40000000);
  351. // Get signature of hypervisor.
  352. alignas(sizeof(u32)) char hypervisor_signature_buffer[13];
  353. *reinterpret_cast<u32*>(hypervisor_signature_buffer) = hypervisor_leaf_range.ebx();
  354. *reinterpret_cast<u32*>(hypervisor_signature_buffer + 4) = hypervisor_leaf_range.ecx();
  355. *reinterpret_cast<u32*>(hypervisor_signature_buffer + 8) = hypervisor_leaf_range.edx();
  356. hypervisor_signature_buffer[12] = '\0';
  357. StringView hypervisor_signature(hypervisor_signature_buffer);
  358. dmesgln("CPU[{}]: CPUID hypervisor signature '{}' ({:#x} {:#x} {:#x}), max leaf {:#x}", current_id(), hypervisor_signature, hypervisor_leaf_range.ebx(), hypervisor_leaf_range.ecx(), hypervisor_leaf_range.edx(), hypervisor_leaf_range.eax());
  359. if (hypervisor_signature == "Microsoft Hv"sv)
  360. detect_hypervisor_hyperv(hypervisor_leaf_range);
  361. }
  362. UNMAP_AFTER_INIT void Processor::detect_hypervisor_hyperv(CPUID const& hypervisor_leaf_range)
  363. {
  364. if (hypervisor_leaf_range.eax() < 0x40000001)
  365. return;
  366. CPUID hypervisor_interface(0x40000001);
  367. // Get signature of hypervisor interface.
  368. alignas(sizeof(u32)) char interface_signature_buffer[5];
  369. *reinterpret_cast<u32*>(interface_signature_buffer) = hypervisor_interface.eax();
  370. interface_signature_buffer[4] = '\0';
  371. StringView hyperv_interface_signature(interface_signature_buffer);
  372. dmesgln("CPU[{}]: Hyper-V interface signature '{}' ({:#x})", current_id(), hyperv_interface_signature, hypervisor_interface.eax());
  373. if (hypervisor_leaf_range.eax() < 0x40000001)
  374. return;
  375. CPUID hypervisor_sysid(0x40000002);
  376. dmesgln("CPU[{}]: Hyper-V system identity {}.{}, build number {}", current_id(), hypervisor_sysid.ebx() >> 16, hypervisor_sysid.ebx() & 0xFFFF, hypervisor_sysid.eax());
  377. if (hypervisor_leaf_range.eax() < 0x40000005 || hyperv_interface_signature != "Hv#1"sv)
  378. return;
  379. dmesgln("CPU[{}]: Hyper-V hypervisor detected", current_id());
  380. // TODO: Actually do something with Hyper-V.
  381. }
  382. void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
  383. {
  384. u16 i = (selector & 0xfffc) >> 3;
  385. u32 prev_gdt_length = m_gdt_length;
  386. if (i >= m_gdt_length) {
  387. m_gdt_length = i + 1;
  388. VERIFY(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
  389. m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
  390. }
  391. m_gdt[i].low = low;
  392. m_gdt[i].high = high;
  393. // clear selectors we may have skipped
  394. while (i < prev_gdt_length) {
  395. m_gdt[i].low = 0;
  396. m_gdt[i].high = 0;
  397. i++;
  398. }
  399. }
  400. void Processor::write_gdt_entry(u16 selector, Descriptor& descriptor)
  401. {
  402. write_raw_gdt_entry(selector, descriptor.low, descriptor.high);
  403. }
  404. Descriptor& Processor::get_gdt_entry(u16 selector)
  405. {
  406. u16 i = (selector & 0xfffc) >> 3;
  407. return *(Descriptor*)(&m_gdt[i]);
  408. }
  409. void Processor::flush_gdt()
  410. {
  411. m_gdtr.address = m_gdt;
  412. m_gdtr.limit = (m_gdt_length * 8) - 1;
  413. asm volatile("lgdt %0" ::"m"(m_gdtr)
  414. : "memory");
  415. }
  416. const DescriptorTablePointer& Processor::get_gdtr()
  417. {
  418. return m_gdtr;
  419. }
  420. Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
  421. {
  422. FlatPtr frame_ptr = 0, ip = 0;
  423. Vector<FlatPtr, 32> stack_trace;
  424. auto walk_stack = [&](FlatPtr stack_ptr) {
  425. static constexpr size_t max_stack_frames = 4096;
  426. bool is_walking_userspace_stack = false;
  427. stack_trace.append(ip);
  428. size_t count = 1;
  429. while (stack_ptr && stack_trace.size() < max_stack_frames) {
  430. FlatPtr retaddr;
  431. count++;
  432. if (max_frames != 0 && count > max_frames)
  433. break;
  434. if (!Memory::is_user_address(VirtualAddress { stack_ptr })) {
  435. if (is_walking_userspace_stack) {
  436. dbgln("SHENANIGANS! Userspace stack points back into kernel memory");
  437. break;
  438. }
  439. } else {
  440. is_walking_userspace_stack = true;
  441. }
  442. if (Memory::is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
  443. if (copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]).is_error() || !retaddr)
  444. break;
  445. stack_trace.append(retaddr);
  446. if (copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr).is_error())
  447. break;
  448. } else {
  449. void* fault_at;
  450. if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at) || !retaddr)
  451. break;
  452. stack_trace.append(retaddr);
  453. if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at))
  454. break;
  455. }
  456. }
  457. };
  458. auto capture_current_thread = [&]() {
  459. frame_ptr = (FlatPtr)__builtin_frame_address(0);
  460. ip = (FlatPtr)__builtin_return_address(0);
  461. walk_stack(frame_ptr);
  462. };
  463. // Since the thread may be running on another processor, there
  464. // is a chance a context switch may happen while we're trying
  465. // to get it. It also won't be entirely accurate and merely
  466. // reflect the status at the last context switch.
  467. SpinlockLocker lock(g_scheduler_lock);
  468. if (&thread == Processor::current_thread()) {
  469. VERIFY(thread.state() == Thread::Running);
  470. // Leave the scheduler lock. If we trigger page faults we may
  471. // need to be preempted. Since this is our own thread it won't
  472. // cause any problems as the stack won't change below this frame.
  473. lock.unlock();
  474. capture_current_thread();
  475. } else if (thread.is_active()) {
  476. VERIFY(thread.cpu() != Processor::current_id());
  477. // If this is the case, the thread is currently running
  478. // on another processor. We can't trust the kernel stack as
  479. // it may be changing at any time. We need to probably send
  480. // an IPI to that processor, have it walk the stack and wait
  481. // until it returns the data back to us
  482. auto& proc = Processor::current();
  483. smp_unicast(
  484. thread.cpu(),
  485. [&]() {
  486. dbgln("CPU[{}] getting stack for cpu #{}", Processor::current_id(), proc.id());
  487. ScopedAddressSpaceSwitcher switcher(thread.process());
  488. VERIFY(&Processor::current() != &proc);
  489. VERIFY(&thread == Processor::current_thread());
  490. // NOTE: Because the other processor is still holding the
  491. // scheduler lock while waiting for this callback to finish,
  492. // the current thread on the target processor cannot change
  493. // TODO: What to do about page faults here? We might deadlock
  494. // because the other processor is still holding the
  495. // scheduler lock...
  496. capture_current_thread();
  497. },
  498. false);
  499. } else {
  500. switch (thread.state()) {
  501. case Thread::Running:
  502. VERIFY_NOT_REACHED(); // should have been handled above
  503. case Thread::Runnable:
  504. case Thread::Stopped:
  505. case Thread::Blocked:
  506. case Thread::Dying:
  507. case Thread::Dead: {
  508. // We need to retrieve ebp from what was last pushed to the kernel
  509. // stack. Before switching out of that thread, it switch_context
  510. // pushed the callee-saved registers, and the last of them happens
  511. // to be ebp.
  512. ScopedAddressSpaceSwitcher switcher(thread.process());
  513. auto& regs = thread.regs();
  514. auto* stack_top = reinterpret_cast<FlatPtr*>(regs.sp());
  515. if (Memory::is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
  516. if (copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]).is_error())
  517. frame_ptr = 0;
  518. } else {
  519. void* fault_at;
  520. if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
  521. frame_ptr = 0;
  522. }
  523. ip = regs.ip();
  524. // TODO: We need to leave the scheduler lock here, but we also
  525. // need to prevent the target thread from being run while
  526. // we walk the stack
  527. lock.unlock();
  528. walk_stack(frame_ptr);
  529. break;
  530. }
  531. default:
  532. dbgln("Cannot capture stack trace for thread {} in state {}", thread, thread.state_string());
  533. break;
  534. }
  535. }
  536. return stack_trace;
  537. }
  538. ProcessorContainer& Processor::processors()
  539. {
  540. return s_processors;
  541. }
  542. Processor& Processor::by_id(u32 id)
  543. {
  544. return *s_processors[id];
  545. }
  546. void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
  547. {
  548. VERIFY_INTERRUPTS_DISABLED();
  549. VERIFY(&Processor::current() == this);
  550. trap.prev_irq_level = m_in_irq;
  551. if (raise_irq)
  552. m_in_irq++;
  553. auto* current_thread = Processor::current_thread();
  554. if (current_thread) {
  555. auto& current_trap = current_thread->current_trap();
  556. trap.next_trap = current_trap;
  557. current_trap = &trap;
  558. // The cs register of this trap tells us where we will return back to
  559. auto new_previous_mode = ((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode;
  560. if (current_thread->set_previous_mode(new_previous_mode) && trap.prev_irq_level == 0) {
  561. current_thread->update_time_scheduled(Scheduler::current_time(), new_previous_mode == Thread::PreviousMode::KernelMode, false);
  562. }
  563. } else {
  564. trap.next_trap = nullptr;
  565. }
  566. }
  567. void Processor::exit_trap(TrapFrame& trap)
  568. {
  569. VERIFY_INTERRUPTS_DISABLED();
  570. VERIFY(&Processor::current() == this);
  571. // Temporarily enter a critical section. This is to prevent critical
  572. // sections entered and left within e.g. smp_process_pending_messages
  573. // to trigger a context switch while we're executing this function
  574. // See the comment at the end of the function why we don't use
  575. // ScopedCritical here.
  576. m_in_critical = m_in_critical + 1;
  577. VERIFY(m_in_irq >= trap.prev_irq_level);
  578. m_in_irq = trap.prev_irq_level;
  579. if (s_smp_enabled)
  580. smp_process_pending_messages();
  581. // Process the deferred call queue. Among other things, this ensures
  582. // that any pending thread unblocks happen before we enter the scheduler.
  583. deferred_call_execute_pending();
  584. auto* current_thread = Processor::current_thread();
  585. if (current_thread) {
  586. auto& current_trap = current_thread->current_trap();
  587. current_trap = trap.next_trap;
  588. Thread::PreviousMode new_previous_mode;
  589. if (current_trap) {
  590. VERIFY(current_trap->regs);
  591. // If we have another higher level trap then we probably returned
  592. // from an interrupt or irq handler. The cs register of the
  593. // new/higher level trap tells us what the mode prior to it was
  594. new_previous_mode = ((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode;
  595. } else {
  596. // If we don't have a higher level trap then we're back in user mode.
  597. // Which means that the previous mode prior to being back in user mode was kernel mode
  598. new_previous_mode = Thread::PreviousMode::KernelMode;
  599. }
  600. if (current_thread->set_previous_mode(new_previous_mode))
  601. current_thread->update_time_scheduled(Scheduler::current_time(), true, false);
  602. }
  603. VERIFY_INTERRUPTS_DISABLED();
  604. // Leave the critical section without actually enabling interrupts.
  605. // We don't want context switches to happen until we're explicitly
  606. // triggering a switch in check_invoke_scheduler.
  607. m_in_critical = m_in_critical - 1;
  608. if (!m_in_irq && !m_in_critical)
  609. check_invoke_scheduler();
  610. }
  611. void Processor::check_invoke_scheduler()
  612. {
  613. InterruptDisabler disabler;
  614. VERIFY(!m_in_irq);
  615. VERIFY(!m_in_critical);
  616. VERIFY(&Processor::current() == this);
  617. if (m_invoke_scheduler_async && m_scheduler_initialized) {
  618. m_invoke_scheduler_async = false;
  619. Scheduler::invoke_async();
  620. }
  621. }
  622. void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
  623. {
  624. auto ptr = vaddr.as_ptr();
  625. while (page_count > 0) {
  626. // clang-format off
  627. asm volatile("invlpg %0"
  628. :
  629. : "m"(*ptr)
  630. : "memory");
  631. // clang-format on
  632. ptr += PAGE_SIZE;
  633. page_count--;
  634. }
  635. }
  636. void Processor::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
  637. {
  638. if (s_smp_enabled && (!Memory::is_user_address(vaddr) || Process::current().thread_count() > 1))
  639. smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
  640. else
  641. flush_tlb_local(vaddr, page_count);
  642. }
  643. void Processor::smp_return_to_pool(ProcessorMessage& msg)
  644. {
  645. ProcessorMessage* next = nullptr;
  646. for (;;) {
  647. msg.next = next;
  648. if (s_message_pool.compare_exchange_strong(next, &msg, AK::MemoryOrder::memory_order_acq_rel))
  649. break;
  650. Processor::pause();
  651. }
  652. }
  653. ProcessorMessage& Processor::smp_get_from_pool()
  654. {
  655. ProcessorMessage* msg;
  656. // The assumption is that messages are never removed from the pool!
  657. for (;;) {
  658. msg = s_message_pool.load(AK::MemoryOrder::memory_order_consume);
  659. if (!msg) {
  660. if (!Processor::current().smp_process_pending_messages()) {
  661. Processor::pause();
  662. }
  663. continue;
  664. }
  665. // If another processor were to use this message in the meanwhile,
  666. // "msg" is still valid (because it never gets freed). We'd detect
  667. // this because the expected value "msg" and pool would
  668. // no longer match, and the compare_exchange will fail. But accessing
  669. // "msg->next" is always safe here.
  670. if (s_message_pool.compare_exchange_strong(msg, msg->next, AK::MemoryOrder::memory_order_acq_rel)) {
  671. // We successfully "popped" this available message
  672. break;
  673. }
  674. }
  675. VERIFY(msg != nullptr);
  676. return *msg;
  677. }
  678. u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
  679. {
  680. VERIFY_INTERRUPTS_DISABLED();
  681. VERIFY(wake_count > 0);
  682. if (!s_smp_enabled)
  683. return 0;
  684. // Wake at most N - 1 processors
  685. if (wake_count >= Processor::count()) {
  686. wake_count = Processor::count() - 1;
  687. VERIFY(wake_count > 0);
  688. }
  689. u32 current_id = Processor::current_id();
  690. u32 did_wake_count = 0;
  691. auto& apic = APIC::the();
  692. while (did_wake_count < wake_count) {
  693. // Try to get a set of idle CPUs and flip them to busy
  694. u32 idle_mask = s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
  695. u32 idle_count = popcount(idle_mask);
  696. if (idle_count == 0)
  697. break; // No (more) idle processor available
  698. u32 found_mask = 0;
  699. for (u32 i = 0; i < idle_count; i++) {
  700. u32 cpu = bit_scan_forward(idle_mask) - 1;
  701. idle_mask &= ~(1u << cpu);
  702. found_mask |= 1u << cpu;
  703. }
  704. idle_mask = s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
  705. if (idle_mask == 0)
  706. continue; // All of them were flipped to busy, try again
  707. idle_count = popcount(idle_mask);
  708. for (u32 i = 0; i < idle_count; i++) {
  709. u32 cpu = bit_scan_forward(idle_mask) - 1;
  710. idle_mask &= ~(1u << cpu);
  711. // Send an IPI to that CPU to wake it up. There is a possibility
  712. // someone else woke it up as well, or that it woke up due to
  713. // a timer interrupt. But we tried hard to avoid this...
  714. apic.send_ipi(cpu);
  715. did_wake_count++;
  716. }
  717. }
  718. return did_wake_count;
  719. }
  720. UNMAP_AFTER_INIT void Processor::smp_enable()
  721. {
  722. size_t msg_pool_size = Processor::count() * 100u;
  723. size_t msg_entries_cnt = Processor::count();
  724. auto msgs = new ProcessorMessage[msg_pool_size];
  725. auto msg_entries = new ProcessorMessageEntry[msg_pool_size * msg_entries_cnt];
  726. size_t msg_entry_i = 0;
  727. for (size_t i = 0; i < msg_pool_size; i++, msg_entry_i += msg_entries_cnt) {
  728. auto& msg = msgs[i];
  729. msg.next = i < msg_pool_size - 1 ? &msgs[i + 1] : nullptr;
  730. msg.per_proc_entries = &msg_entries[msg_entry_i];
  731. for (size_t k = 0; k < msg_entries_cnt; k++)
  732. msg_entries[msg_entry_i + k].msg = &msg;
  733. }
  734. s_message_pool.store(&msgs[0], AK::MemoryOrder::memory_order_release);
  735. // Start sending IPI messages
  736. s_smp_enabled = true;
  737. }
  738. void Processor::smp_cleanup_message(ProcessorMessage& msg)
  739. {
  740. switch (msg.type) {
  741. case ProcessorMessage::Callback:
  742. msg.callback_value().~Function();
  743. break;
  744. default:
  745. break;
  746. }
  747. }
  748. bool Processor::smp_process_pending_messages()
  749. {
  750. VERIFY(s_smp_enabled);
  751. bool did_process = false;
  752. enter_critical();
  753. if (auto pending_msgs = m_message_queue.exchange(nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
  754. // We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
  755. auto reverse_list =
  756. [](ProcessorMessageEntry* list) -> ProcessorMessageEntry* {
  757. ProcessorMessageEntry* rev_list = nullptr;
  758. while (list) {
  759. auto next = list->next;
  760. list->next = rev_list;
  761. rev_list = list;
  762. list = next;
  763. }
  764. return rev_list;
  765. };
  766. pending_msgs = reverse_list(pending_msgs);
  767. // now process in the right order
  768. ProcessorMessageEntry* next_msg;
  769. for (auto cur_msg = pending_msgs; cur_msg; cur_msg = next_msg) {
  770. next_msg = cur_msg->next;
  771. auto msg = cur_msg->msg;
  772. dbgln_if(SMP_DEBUG, "SMP[{}]: Processing message {}", current_id(), VirtualAddress(msg));
  773. switch (msg->type) {
  774. case ProcessorMessage::Callback:
  775. msg->invoke_callback();
  776. break;
  777. case ProcessorMessage::FlushTlb:
  778. if (Memory::is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
  779. // We assume that we don't cross into kernel land!
  780. VERIFY(Memory::is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
  781. if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
  782. // This processor isn't using this page directory right now, we can ignore this request
  783. dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", current_id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
  784. break;
  785. }
  786. }
  787. flush_tlb_local(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count);
  788. break;
  789. }
  790. bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
  791. auto prev_refs = msg->refs.fetch_sub(1u, AK::MemoryOrder::memory_order_acq_rel);
  792. VERIFY(prev_refs != 0);
  793. if (prev_refs == 1) {
  794. // All processors handled this. If this is an async message,
  795. // we need to clean it up and return it to the pool
  796. if (is_async) {
  797. smp_cleanup_message(*msg);
  798. smp_return_to_pool(*msg);
  799. }
  800. }
  801. if (m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed))
  802. halt_this();
  803. }
  804. did_process = true;
  805. } else if (m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed)) {
  806. halt_this();
  807. }
  808. leave_critical();
  809. return did_process;
  810. }
  811. bool Processor::smp_enqueue_message(ProcessorMessage& msg)
  812. {
  813. // Note that it's quite possible that the other processor may pop
  814. // the queue at any given time. We rely on the fact that the messages
  815. // are pooled and never get freed!
  816. auto& msg_entry = msg.per_proc_entries[id()];
  817. VERIFY(msg_entry.msg == &msg);
  818. ProcessorMessageEntry* next = nullptr;
  819. for (;;) {
  820. msg_entry.next = next;
  821. if (m_message_queue.compare_exchange_strong(next, &msg_entry, AK::MemoryOrder::memory_order_acq_rel))
  822. break;
  823. Processor::pause();
  824. }
  825. // If the enqueued message was the only message in the queue when posted,
  826. // we return true. This is used by callers when deciding whether to generate an IPI.
  827. return next == nullptr;
  828. }
  829. void Processor::smp_broadcast_message(ProcessorMessage& msg)
  830. {
  831. auto& current_processor = Processor::current();
  832. dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} processor: {}", current_processor.id(), VirtualAddress(&msg), count(), VirtualAddress(&current_processor));
  833. msg.refs.store(count() - 1, AK::MemoryOrder::memory_order_release);
  834. VERIFY(msg.refs > 0);
  835. bool need_broadcast = false;
  836. for_each(
  837. [&](Processor& proc) {
  838. if (&proc != &current_processor) {
  839. if (proc.smp_enqueue_message(msg))
  840. need_broadcast = true;
  841. }
  842. });
  843. // Now trigger an IPI on all other APs (unless all targets already had messages queued)
  844. if (need_broadcast)
  845. APIC::the().broadcast_ipi();
  846. }
  847. void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
  848. {
  849. auto& cur_proc = Processor::current();
  850. VERIFY(!msg.async);
  851. // If synchronous then we must cleanup and return the message back
  852. // to the pool. Otherwise, the last processor to complete it will return it
  853. while (msg.refs.load(AK::MemoryOrder::memory_order_consume) != 0) {
  854. Processor::pause();
  855. // We need to process any messages that may have been sent to
  856. // us while we're waiting. This also checks if another processor
  857. // may have requested us to halt.
  858. cur_proc.smp_process_pending_messages();
  859. }
  860. smp_cleanup_message(msg);
  861. smp_return_to_pool(msg);
  862. }
  863. void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
  864. {
  865. auto& current_processor = Processor::current();
  866. VERIFY(cpu != current_processor.id());
  867. auto& target_processor = processors()[cpu];
  868. msg.async = async;
  869. dbgln_if(SMP_DEBUG, "SMP[{}]: Send message {} to cpu #{} processor: {}", current_processor.id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_processor));
  870. msg.refs.store(1u, AK::MemoryOrder::memory_order_release);
  871. if (target_processor->smp_enqueue_message(msg)) {
  872. APIC::the().send_ipi(cpu);
  873. }
  874. if (!async) {
  875. // If synchronous then we must cleanup and return the message back
  876. // to the pool. Otherwise, the last processor to complete it will return it
  877. while (msg.refs.load(AK::MemoryOrder::memory_order_consume) != 0) {
  878. Processor::pause();
  879. // We need to process any messages that may have been sent to
  880. // us while we're waiting. This also checks if another processor
  881. // may have requested us to halt.
  882. current_processor.smp_process_pending_messages();
  883. }
  884. smp_cleanup_message(msg);
  885. smp_return_to_pool(msg);
  886. }
  887. }
  888. void Processor::smp_unicast(u32 cpu, Function<void()> callback, bool async)
  889. {
  890. auto& msg = smp_get_from_pool();
  891. msg.type = ProcessorMessage::Callback;
  892. new (msg.callback_storage) ProcessorMessage::CallbackFunction(move(callback));
  893. smp_unicast_message(cpu, msg, async);
  894. }
  895. void Processor::smp_broadcast_flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
  896. {
  897. auto& msg = smp_get_from_pool();
  898. msg.async = false;
  899. msg.type = ProcessorMessage::FlushTlb;
  900. msg.flush_tlb.page_directory = page_directory;
  901. msg.flush_tlb.ptr = vaddr.as_ptr();
  902. msg.flush_tlb.page_count = page_count;
  903. smp_broadcast_message(msg);
  904. // While the other processors handle this request, we'll flush ours
  905. flush_tlb_local(vaddr, page_count);
  906. // Now wait until everybody is done as well
  907. smp_broadcast_wait_sync(msg);
  908. }
  909. void Processor::smp_broadcast_halt()
  910. {
  911. // We don't want to use a message, because this could have been triggered
  912. // by being out of memory and we might not be able to get a message
  913. for_each(
  914. [&](Processor& proc) {
  915. proc.m_halt_requested.store(true, AK::MemoryOrder::memory_order_release);
  916. });
  917. // Now trigger an IPI on all other APs
  918. APIC::the().broadcast_ipi();
  919. }
  920. void Processor::Processor::halt()
  921. {
  922. if (s_smp_enabled)
  923. smp_broadcast_halt();
  924. halt_this();
  925. }
  926. UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
  927. {
  928. size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]);
  929. for (size_t i = 0; i < pool_count; i++) {
  930. auto& entry = m_deferred_call_pool[i];
  931. entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
  932. new (entry.handler_storage) DeferredCallEntry::HandlerFunction;
  933. entry.was_allocated = false;
  934. }
  935. m_pending_deferred_calls = nullptr;
  936. m_free_deferred_call_pool_entry = &m_deferred_call_pool[0];
  937. }
  938. void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
  939. {
  940. VERIFY(m_in_critical);
  941. VERIFY(!entry->was_allocated);
  942. entry->handler_value() = {};
  943. entry->next = m_free_deferred_call_pool_entry;
  944. m_free_deferred_call_pool_entry = entry;
  945. }
  946. DeferredCallEntry* Processor::deferred_call_get_free()
  947. {
  948. VERIFY(m_in_critical);
  949. if (m_free_deferred_call_pool_entry) {
  950. // Fast path, we have an entry in our pool
  951. auto* entry = m_free_deferred_call_pool_entry;
  952. m_free_deferred_call_pool_entry = entry->next;
  953. VERIFY(!entry->was_allocated);
  954. return entry;
  955. }
  956. auto* entry = new DeferredCallEntry;
  957. new (entry->handler_storage) DeferredCallEntry::HandlerFunction;
  958. entry->was_allocated = true;
  959. return entry;
  960. }
  961. void Processor::deferred_call_execute_pending()
  962. {
  963. VERIFY(m_in_critical);
  964. if (!m_pending_deferred_calls)
  965. return;
  966. auto* pending_list = m_pending_deferred_calls;
  967. m_pending_deferred_calls = nullptr;
  968. // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
  969. auto reverse_list =
  970. [](DeferredCallEntry* list) -> DeferredCallEntry* {
  971. DeferredCallEntry* rev_list = nullptr;
  972. while (list) {
  973. auto next = list->next;
  974. list->next = rev_list;
  975. rev_list = list;
  976. list = next;
  977. }
  978. return rev_list;
  979. };
  980. pending_list = reverse_list(pending_list);
  981. do {
  982. pending_list->invoke_handler();
  983. // Return the entry back to the pool, or free it
  984. auto* next = pending_list->next;
  985. if (pending_list->was_allocated) {
  986. pending_list->handler_value().~Function();
  987. delete pending_list;
  988. } else
  989. deferred_call_return_to_pool(pending_list);
  990. pending_list = next;
  991. } while (pending_list);
  992. }
  993. void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
  994. {
  995. VERIFY(m_in_critical);
  996. entry->next = m_pending_deferred_calls;
  997. m_pending_deferred_calls = entry;
  998. }
  999. void Processor::deferred_call_queue(Function<void()> callback)
  1000. {
  1001. // NOTE: If we are called outside of a critical section and outside
  1002. // of an irq handler, the function will be executed before we return!
  1003. ScopedCritical critical;
  1004. auto& cur_proc = Processor::current();
  1005. auto* entry = cur_proc.deferred_call_get_free();
  1006. entry->handler_value() = move(callback);
  1007. cur_proc.deferred_call_queue_entry(entry);
  1008. }
  1009. UNMAP_AFTER_INIT void Processor::gdt_init()
  1010. {
  1011. m_gdt_length = 0;
  1012. m_gdtr.address = nullptr;
  1013. m_gdtr.limit = 0;
  1014. write_raw_gdt_entry(0x0000, 0x00000000, 0x00000000);
  1015. #if ARCH(I386)
  1016. write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00cf9a00); // code0
  1017. write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00cf9200); // data0
  1018. write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00cffa00); // code3
  1019. write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x00cff200); // data3
  1020. #else
  1021. write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00af9a00); // code0
  1022. write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00af9200); // data0
  1023. write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x008ff200); // data3
  1024. write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00affa00); // code3
  1025. #endif
  1026. #if ARCH(I386)
  1027. Descriptor tls_descriptor {};
  1028. tls_descriptor.low = tls_descriptor.high = 0;
  1029. tls_descriptor.dpl = 3;
  1030. tls_descriptor.segment_present = 1;
  1031. tls_descriptor.granularity = 0;
  1032. tls_descriptor.operation_size64 = 0;
  1033. tls_descriptor.operation_size32 = 1;
  1034. tls_descriptor.descriptor_type = 1;
  1035. tls_descriptor.type = 2;
  1036. write_gdt_entry(GDT_SELECTOR_TLS, tls_descriptor); // tls3
  1037. Descriptor gs_descriptor {};
  1038. gs_descriptor.set_base(VirtualAddress { this });
  1039. gs_descriptor.set_limit(sizeof(Processor) - 1);
  1040. gs_descriptor.dpl = 0;
  1041. gs_descriptor.segment_present = 1;
  1042. gs_descriptor.granularity = 0;
  1043. gs_descriptor.operation_size64 = 0;
  1044. gs_descriptor.operation_size32 = 1;
  1045. gs_descriptor.descriptor_type = 1;
  1046. gs_descriptor.type = 2;
  1047. write_gdt_entry(GDT_SELECTOR_PROC, gs_descriptor); // gs0
  1048. #endif
  1049. Descriptor tss_descriptor {};
  1050. tss_descriptor.set_base(VirtualAddress { (size_t)&m_tss & 0xffffffff });
  1051. tss_descriptor.set_limit(sizeof(TSS) - 1);
  1052. tss_descriptor.dpl = 0;
  1053. tss_descriptor.segment_present = 1;
  1054. tss_descriptor.granularity = 0;
  1055. tss_descriptor.operation_size64 = 0;
  1056. tss_descriptor.operation_size32 = 1;
  1057. tss_descriptor.descriptor_type = 0;
  1058. tss_descriptor.type = 9;
  1059. write_gdt_entry(GDT_SELECTOR_TSS, tss_descriptor); // tss
  1060. #if ARCH(X86_64)
  1061. Descriptor tss_descriptor_part2 {};
  1062. tss_descriptor_part2.low = (size_t)&m_tss >> 32;
  1063. write_gdt_entry(GDT_SELECTOR_TSS_PART2, tss_descriptor_part2);
  1064. #endif
  1065. flush_gdt();
  1066. load_task_register(GDT_SELECTOR_TSS);
  1067. #if ARCH(X86_64)
  1068. MSR gs_base(MSR_GS_BASE);
  1069. gs_base.set((u64)this);
  1070. #else
  1071. asm volatile(
  1072. "mov %%ax, %%ds\n"
  1073. "mov %%ax, %%es\n"
  1074. "mov %%ax, %%fs\n"
  1075. "mov %%ax, %%ss\n" ::"a"(GDT_SELECTOR_DATA0)
  1076. : "memory");
  1077. set_gs(GDT_SELECTOR_PROC);
  1078. #endif
  1079. #if ARCH(I386)
  1080. // Make sure CS points to the kernel code descriptor.
  1081. // clang-format off
  1082. asm volatile(
  1083. "ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
  1084. "sanity:\n");
  1085. // clang-format on
  1086. #endif
  1087. }
  1088. extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
  1089. {
  1090. VERIFY(!are_interrupts_enabled());
  1091. VERIFY(is_kernel_mode());
  1092. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  1093. VERIFY(to_thread == Thread::current());
  1094. Scheduler::enter_current(*from_thread, true);
  1095. auto in_critical = to_thread->saved_critical();
  1096. VERIFY(in_critical > 0);
  1097. Processor::restore_in_critical(in_critical);
  1098. // Since we got here and don't have Scheduler::context_switch in the
  1099. // call stack (because this is the first time we switched into this
  1100. // context), we need to notify the scheduler so that it can release
  1101. // the scheduler lock. We don't want to enable interrupts at this point
  1102. // as we're still in the middle of a context switch. Doing so could
  1103. // trigger a context switch within a context switch, leading to a crash.
  1104. FlatPtr flags = trap->regs->flags();
  1105. Scheduler::leave_on_first_switch(flags & ~0x200);
  1106. }
  1107. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
  1108. {
  1109. VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running);
  1110. VERIFY(to_thread->state() == Thread::Running);
  1111. bool has_fxsr = Processor::current().has_feature(CPUFeature::FXSR);
  1112. Processor::set_current_thread(*to_thread);
  1113. auto& from_regs = from_thread->regs();
  1114. auto& to_regs = to_thread->regs();
  1115. if (has_fxsr)
  1116. asm volatile("fxsave %0"
  1117. : "=m"(from_thread->fpu_state()));
  1118. else
  1119. asm volatile("fnsave %0"
  1120. : "=m"(from_thread->fpu_state()));
  1121. #if ARCH(I386)
  1122. from_regs.fs = get_fs();
  1123. from_regs.gs = get_gs();
  1124. set_fs(to_regs.fs);
  1125. set_gs(to_regs.gs);
  1126. #endif
  1127. if (from_thread->process().is_traced())
  1128. read_debug_registers_into(from_thread->debug_register_state());
  1129. if (to_thread->process().is_traced()) {
  1130. write_debug_registers_from(to_thread->debug_register_state());
  1131. } else {
  1132. clear_debug_registers();
  1133. }
  1134. auto& processor = Processor::current();
  1135. #if ARCH(I386)
  1136. auto& tls_descriptor = processor.get_gdt_entry(GDT_SELECTOR_TLS);
  1137. tls_descriptor.set_base(to_thread->thread_specific_data());
  1138. tls_descriptor.set_limit(to_thread->thread_specific_region_size());
  1139. #else
  1140. MSR fs_base_msr(MSR_FS_BASE);
  1141. fs_base_msr.set(to_thread->thread_specific_data().get());
  1142. #endif
  1143. if (from_regs.cr3 != to_regs.cr3)
  1144. write_cr3(to_regs.cr3);
  1145. to_thread->set_cpu(processor.id());
  1146. auto in_critical = to_thread->saved_critical();
  1147. VERIFY(in_critical > 0);
  1148. Processor::restore_in_critical(in_critical);
  1149. if (has_fxsr)
  1150. asm volatile("fxrstor %0" ::"m"(to_thread->fpu_state()));
  1151. else
  1152. asm volatile("frstor %0" ::"m"(to_thread->fpu_state()));
  1153. // TODO: ioperm?
  1154. }
  1155. extern "C" FlatPtr do_init_context(Thread* thread, u32 flags)
  1156. {
  1157. VERIFY_INTERRUPTS_DISABLED();
  1158. thread->regs().set_flags(flags);
  1159. return Processor::current().init_context(*thread, true);
  1160. }
  1161. void Processor::assume_context(Thread& thread, FlatPtr flags)
  1162. {
  1163. dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
  1164. VERIFY_INTERRUPTS_DISABLED();
  1165. Scheduler::prepare_after_exec();
  1166. // in_critical() should be 2 here. The critical section in Process::exec
  1167. // and then the scheduler lock
  1168. VERIFY(Processor::in_critical() == 2);
  1169. do_assume_context(&thread, flags);
  1170. VERIFY_NOT_REACHED();
  1171. }
  1172. u64 Processor::time_spent_idle() const
  1173. {
  1174. return m_idle_thread->time_in_user() + m_idle_thread->time_in_kernel();
  1175. }
  1176. }