Processor.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/StdLibExtras.h>
  7. #include <Kernel/Arch/x86/Processor.h>
  8. #include <Kernel/Arch/x86/TrapFrame.h>
  9. #include <Kernel/Panic.h>
  10. #include <Kernel/Process.h>
  11. #include <Kernel/Random.h>
  12. #include <Kernel/Sections.h>
  13. #include <Kernel/Thread.h>
  14. namespace Kernel {
  15. #define ENTER_THREAD_CONTEXT_ARGS_SIZE (2 * 4) // to_thread, from_thread
  16. extern "C" void thread_context_first_enter(void);
  17. extern "C" void do_assume_context(Thread* thread, u32 flags);
  18. extern "C" void exit_kernel_thread(void);
  19. // clang-format off
  20. asm(
  21. // enter_thread_context returns to here first time a thread is executing
  22. ".globl thread_context_first_enter \n"
  23. "thread_context_first_enter: \n"
  24. // switch_context will have pushed from_thread and to_thread to our new
  25. // stack prior to thread_context_first_enter() being called, and the
  26. // pointer to TrapFrame was the top of the stack before that
  27. " movl 8(%esp), %ebx \n" // save pointer to TrapFrame
  28. " cld \n"
  29. " call context_first_init \n"
  30. " addl $" __STRINGIFY(ENTER_THREAD_CONTEXT_ARGS_SIZE) ", %esp \n"
  31. " movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
  32. " jmp common_trap_exit \n"
  33. );
  34. // clang-format on
  35. // clang-format off
  36. asm(
  37. ".global do_assume_context \n"
  38. "do_assume_context: \n"
  39. " movl 4(%esp), %ebx \n"
  40. " movl 8(%esp), %esi \n"
  41. // We're going to call Processor::init_context, so just make sure
  42. // we have enough stack space so we don't stomp over it
  43. " subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n"
  44. " pushl %esi \n"
  45. " pushl %ebx \n"
  46. " cld \n"
  47. " call do_init_context \n"
  48. " addl $8, %esp \n"
  49. " movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us
  50. " pushl %ebx \n" // push to_thread
  51. " pushl %ebx \n" // push from_thread
  52. " pushl $thread_context_first_enter \n" // should be same as tss.eip
  53. " jmp enter_thread_context \n"
  54. );
  55. // clang-format on
  56. String Processor::platform_string() const
  57. {
  58. // FIXME: other platforms
  59. return "i386";
  60. }
  61. u32 Processor::init_context(Thread& thread, bool leave_crit)
  62. {
  63. VERIFY(is_kernel_mode());
  64. VERIFY(g_scheduler_lock.is_locked());
  65. if (leave_crit) {
  66. // Leave the critical section we set up in in Process::exec,
  67. // but because we still have the scheduler lock we should end up with 1
  68. m_in_critical--; // leave it without triggering anything or restoring flags
  69. VERIFY(in_critical() == 1);
  70. }
  71. u32 kernel_stack_top = thread.kernel_stack_top();
  72. // Add a random offset between 0-256 (16-byte aligned)
  73. kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
  74. u32 stack_top = kernel_stack_top;
  75. // TODO: handle NT?
  76. VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
  77. auto& tss = thread.tss();
  78. bool return_to_user = (tss.cs & 3) != 0;
  79. // make room for an interrupt frame
  80. if (!return_to_user) {
  81. // userspace_esp and userspace_ss are not popped off by iret
  82. // unless we're switching back to user mode
  83. stack_top -= sizeof(RegisterState) - 2 * sizeof(u32);
  84. // For kernel threads we'll push the thread function argument
  85. // which should be in tss.esp and exit_kernel_thread as return
  86. // address.
  87. stack_top -= 2 * sizeof(u32);
  88. *reinterpret_cast<u32*>(kernel_stack_top - 2 * sizeof(u32)) = tss.esp;
  89. *reinterpret_cast<u32*>(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread);
  90. } else {
  91. stack_top -= sizeof(RegisterState);
  92. }
  93. // we want to end up 16-byte aligned, %esp + 4 should be aligned
  94. stack_top -= sizeof(u32);
  95. *reinterpret_cast<u32*>(kernel_stack_top - sizeof(u32)) = 0;
  96. // set up the stack so that after returning from thread_context_first_enter()
  97. // we will end up either in kernel mode or user mode, depending on how the thread is set up
  98. // However, the first step is to always start in kernel mode with thread_context_first_enter
  99. RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
  100. iretframe.ss = tss.ss;
  101. iretframe.gs = tss.gs;
  102. iretframe.fs = tss.fs;
  103. iretframe.es = tss.es;
  104. iretframe.ds = tss.ds;
  105. iretframe.edi = tss.edi;
  106. iretframe.esi = tss.esi;
  107. iretframe.ebp = tss.ebp;
  108. iretframe.esp = 0;
  109. iretframe.ebx = tss.ebx;
  110. iretframe.edx = tss.edx;
  111. iretframe.ecx = tss.ecx;
  112. iretframe.eax = tss.eax;
  113. iretframe.eflags = tss.eflags;
  114. iretframe.eip = tss.eip;
  115. iretframe.cs = tss.cs;
  116. if (return_to_user) {
  117. iretframe.userspace_esp = tss.esp;
  118. iretframe.userspace_ss = tss.ss;
  119. }
  120. // make space for a trap frame
  121. stack_top -= sizeof(TrapFrame);
  122. TrapFrame& trap = *reinterpret_cast<TrapFrame*>(stack_top);
  123. trap.regs = &iretframe;
  124. trap.prev_irq_level = 0;
  125. trap.next_trap = nullptr;
  126. stack_top -= sizeof(u32); // pointer to TrapFrame
  127. *reinterpret_cast<u32*>(stack_top) = stack_top + 4;
  128. if constexpr (CONTEXT_SWITCH_DEBUG) {
  129. if (return_to_user) {
  130. dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}",
  131. thread,
  132. VirtualAddress(&thread),
  133. iretframe.cs, tss.eip,
  134. VirtualAddress(tss.esp),
  135. VirtualAddress(stack_top),
  136. iretframe.userspace_ss,
  137. iretframe.userspace_esp);
  138. } else {
  139. dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}",
  140. thread,
  141. VirtualAddress(&thread),
  142. iretframe.cs, tss.eip,
  143. VirtualAddress(tss.esp),
  144. VirtualAddress(stack_top));
  145. }
  146. }
  147. // make switch_context() always first return to thread_context_first_enter()
  148. // in kernel mode, so set up these values so that we end up popping iretframe
  149. // off the stack right after the context switch completed, at which point
  150. // control is transferred to what iretframe is pointing to.
  151. tss.eip = FlatPtr(&thread_context_first_enter);
  152. tss.esp0 = kernel_stack_top;
  153. tss.esp = stack_top;
  154. tss.cs = GDT_SELECTOR_CODE0;
  155. tss.ds = GDT_SELECTOR_DATA0;
  156. tss.es = GDT_SELECTOR_DATA0;
  157. tss.gs = GDT_SELECTOR_DATA0;
  158. tss.ss = GDT_SELECTOR_DATA0;
  159. tss.fs = GDT_SELECTOR_PROC;
  160. return stack_top;
  161. }
  162. void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
  163. {
  164. VERIFY(!in_irq());
  165. VERIFY(m_in_critical == 1);
  166. VERIFY(is_kernel_mode());
  167. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
  168. from_thread->save_critical(m_in_critical);
  169. // clang-format off
  170. // Switch to new thread context, passing from_thread and to_thread
  171. // through to the new context using registers edx and eax
  172. asm volatile(
  173. // NOTE: changing how much we push to the stack affects
  174. // SWITCH_CONTEXT_TO_STACK_SIZE and thread_context_first_enter()!
  175. "pushfl \n"
  176. "pushl %%ebx \n"
  177. "pushl %%esi \n"
  178. "pushl %%edi \n"
  179. "pushl %%ebp \n"
  180. "movl %%esp, %[from_esp] \n"
  181. "movl $1f, %[from_eip] \n"
  182. "movl %[to_esp0], %%ebx \n"
  183. "movl %%ebx, %[tss_esp0] \n"
  184. "movl %[to_esp], %%esp \n"
  185. "pushl %[to_thread] \n"
  186. "pushl %[from_thread] \n"
  187. "pushl %[to_eip] \n"
  188. "cld \n"
  189. "jmp enter_thread_context \n"
  190. "1: \n"
  191. "popl %%edx \n"
  192. "popl %%eax \n"
  193. "popl %%ebp \n"
  194. "popl %%edi \n"
  195. "popl %%esi \n"
  196. "popl %%ebx \n"
  197. "popfl \n"
  198. : [from_esp] "=m" (from_thread->tss().esp),
  199. [from_eip] "=m" (from_thread->tss().eip),
  200. [tss_esp0] "=m" (m_tss.esp0),
  201. "=d" (from_thread), // needed so that from_thread retains the correct value
  202. "=a" (to_thread) // needed so that to_thread retains the correct value
  203. : [to_esp] "g" (to_thread->tss().esp),
  204. [to_esp0] "g" (to_thread->tss().esp0),
  205. [to_eip] "c" (to_thread->tss().eip),
  206. [from_thread] "d" (from_thread),
  207. [to_thread] "a" (to_thread)
  208. : "memory"
  209. );
  210. // clang-format on
  211. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  212. Processor::current().restore_in_critical(to_thread->saved_critical());
  213. }
  214. void Processor::assume_context(Thread& thread, FlatPtr flags)
  215. {
  216. dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
  217. VERIFY_INTERRUPTS_DISABLED();
  218. Scheduler::prepare_after_exec();
  219. // in_critical() should be 2 here. The critical section in Process::exec
  220. // and then the scheduler lock
  221. VERIFY(Processor::current().in_critical() == 2);
  222. do_assume_context(&thread, flags);
  223. VERIFY_NOT_REACHED();
  224. }
  225. UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
  226. {
  227. VERIFY(initial_thread.process().is_kernel_process());
  228. auto& tss = initial_thread.tss();
  229. m_tss = tss;
  230. m_tss.esp0 = tss.esp0;
  231. m_tss.ss0 = GDT_SELECTOR_DATA0;
  232. // user mode needs to be able to switch to kernel mode:
  233. m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
  234. m_tss.fs = GDT_SELECTOR_PROC | 3;
  235. m_scheduler_initialized = true;
  236. // clang-format off
  237. asm volatile(
  238. "movl %[new_esp], %%esp \n" // switch to new stack
  239. "pushl %[from_to_thread] \n" // to_thread
  240. "pushl %[from_to_thread] \n" // from_thread
  241. "pushl $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n"
  242. "pushl %[new_eip] \n" // save the entry eip to the stack
  243. "movl %%esp, %%ebx \n"
  244. "addl $20, %%ebx \n" // calculate pointer to TrapFrame
  245. "pushl %%ebx \n"
  246. "cld \n"
  247. "pushl %[cpu] \n" // push argument for init_finished before register is clobbered
  248. "call pre_init_finished \n"
  249. "call init_finished \n"
  250. "addl $4, %%esp \n"
  251. "call post_init_finished \n"
  252. "call enter_trap_no_irq \n"
  253. "addl $4, %%esp \n"
  254. "lret \n"
  255. :: [new_esp] "g" (tss.esp),
  256. [new_eip] "a" (tss.eip),
  257. [from_to_thread] "b" (&initial_thread),
  258. [cpu] "c" (id())
  259. );
  260. // clang-format on
  261. VERIFY_NOT_REACHED();
  262. }
  263. }