Processor.cpp 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/StdLibExtras.h>
  7. #include <Kernel/Arch/Processor.h>
  8. #include <Kernel/Arch/x86/TrapFrame.h>
  9. #include <Kernel/Panic.h>
  10. #include <Kernel/Process.h>
  11. #include <Kernel/Random.h>
  12. #include <Kernel/Scheduler.h>
  13. #include <Kernel/Sections.h>
  14. #include <Kernel/Thread.h>
  15. namespace Kernel {
  16. NAKED void thread_context_first_enter(void)
  17. {
  18. // enter_thread_context returns to here first time a thread is executing
  19. asm(
  20. // switch_context will have pushed from_thread and to_thread to our news
  21. // stack prior to thread_context_first_enter() being called, and the
  22. // pointer to TrapFrame was the top of the stack before that
  23. " popq %rdi \n" // from_thread (argument 0)
  24. " popq %rsi \n" // to_thread (argument 1)
  25. " popq %rdx \n" // pointer to TrapFrame (argument 2)
  26. " cld \n"
  27. " call context_first_init \n"
  28. " jmp common_trap_exit \n");
  29. };
  30. NAKED void do_assume_context(Thread*, u32)
  31. {
  32. // clang-format off
  33. // FIXME: I hope (Thread* thread, u32 flags) aren't compiled away
  34. asm(
  35. " movq %rdi, %r12 \n" // save thread ptr
  36. " movq %rsi, %r13 \n" // save flags
  37. // We're going to call Processor::init_context, so just make sure
  38. // we have enough stack space so we don't stomp over it
  39. " subq $(" __STRINGIFY(16 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 8) "), %rsp \n"
  40. " cld \n"
  41. " call do_init_context \n"
  42. " movq %rax, %rsp \n" // move stack pointer to what Processor::init_context set up for us
  43. " movq %r12, %rdi \n" // to_thread
  44. " movq %r12, %rsi \n" // from_thread
  45. " pushq %r12 \n" // to_thread (for thread_context_first_enter)
  46. " pushq %r12 \n" // from_thread (for thread_context_first_enter)
  47. " leaq thread_context_first_enter(%rip), %r12 \n" // should be same as regs.rip
  48. " pushq %r12 \n"
  49. " jmp enter_thread_context \n");
  50. // clang-format on
  51. }
  52. StringView Processor::platform_string()
  53. {
  54. return "x86_64"sv;
  55. }
  56. // FIXME: For the most part this is a copy of the i386-specific function, get rid of the code duplication
  57. FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
  58. {
  59. VERIFY(is_kernel_mode());
  60. VERIFY(g_scheduler_lock.is_locked());
  61. if (leave_crit) {
  62. // Leave the critical section we set up in in Process::exec,
  63. // but because we still have the scheduler lock we should end up with 1
  64. VERIFY(in_critical() == 2);
  65. m_in_critical = 1; // leave it without triggering anything or restoring flags
  66. }
  67. u64 kernel_stack_top = thread.kernel_stack_top();
  68. // Add a random offset between 0-256 (16-byte aligned)
  69. kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
  70. u64 stack_top = kernel_stack_top;
  71. // TODO: handle NT?
  72. VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
  73. auto& regs = thread.regs();
  74. bool return_to_user = (regs.cs & 3) != 0;
  75. stack_top -= 1 * sizeof(u64);
  76. *reinterpret_cast<u64*>(kernel_stack_top - 2 * sizeof(u64)) = FlatPtr(&exit_kernel_thread);
  77. stack_top -= sizeof(RegisterState);
  78. // we want to end up 16-byte aligned, %rsp + 8 should be aligned
  79. stack_top -= sizeof(u64);
  80. *reinterpret_cast<u64*>(kernel_stack_top - sizeof(u64)) = 0;
  81. // set up the stack so that after returning from thread_context_first_enter()
  82. // we will end up either in kernel mode or user mode, depending on how the thread is set up
  83. // However, the first step is to always start in kernel mode with thread_context_first_enter
  84. RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
  85. iretframe.rdi = regs.rdi;
  86. iretframe.rsi = regs.rsi;
  87. iretframe.rbp = regs.rbp;
  88. iretframe.rsp = 0;
  89. iretframe.rbx = regs.rbx;
  90. iretframe.rdx = regs.rdx;
  91. iretframe.rcx = regs.rcx;
  92. iretframe.rax = regs.rax;
  93. iretframe.r8 = regs.r8;
  94. iretframe.r9 = regs.r9;
  95. iretframe.r10 = regs.r10;
  96. iretframe.r11 = regs.r11;
  97. iretframe.r12 = regs.r12;
  98. iretframe.r13 = regs.r13;
  99. iretframe.r14 = regs.r14;
  100. iretframe.r15 = regs.r15;
  101. iretframe.rflags = regs.rflags;
  102. iretframe.rip = regs.rip;
  103. iretframe.cs = regs.cs;
  104. if (return_to_user) {
  105. iretframe.userspace_rsp = regs.rsp;
  106. iretframe.userspace_ss = GDT_SELECTOR_DATA3 | 3;
  107. } else {
  108. iretframe.userspace_rsp = kernel_stack_top;
  109. iretframe.userspace_ss = 0;
  110. }
  111. // make space for a trap frame
  112. stack_top -= sizeof(TrapFrame);
  113. TrapFrame& trap = *reinterpret_cast<TrapFrame*>(stack_top);
  114. trap.regs = &iretframe;
  115. trap.prev_irq_level = 0;
  116. trap.next_trap = nullptr;
  117. stack_top -= sizeof(u64); // pointer to TrapFrame
  118. *reinterpret_cast<u64*>(stack_top) = stack_top + 8;
  119. if constexpr (CONTEXT_SWITCH_DEBUG) {
  120. if (return_to_user) {
  121. dbgln("init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}, user_top={}",
  122. thread,
  123. VirtualAddress(&thread),
  124. iretframe.cs, regs.rip,
  125. VirtualAddress(regs.rsp),
  126. VirtualAddress(stack_top),
  127. iretframe.userspace_rsp);
  128. } else {
  129. dbgln("init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}",
  130. thread,
  131. VirtualAddress(&thread),
  132. iretframe.cs, regs.rip,
  133. VirtualAddress(regs.rsp),
  134. VirtualAddress(stack_top));
  135. }
  136. }
  137. // make switch_context() always first return to thread_context_first_enter()
  138. // in kernel mode, so set up these values so that we end up popping iretframe
  139. // off the stack right after the context switch completed, at which point
  140. // control is transferred to what iretframe is pointing to.
  141. regs.rip = FlatPtr(&thread_context_first_enter);
  142. regs.rsp0 = kernel_stack_top;
  143. regs.rsp = stack_top;
  144. regs.cs = GDT_SELECTOR_CODE0;
  145. return stack_top;
  146. }
  147. void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
  148. {
  149. VERIFY(!m_in_irq);
  150. VERIFY(m_in_critical == 1);
  151. VERIFY(is_kernel_mode());
  152. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
  153. // m_in_critical is restored in enter_thread_context
  154. from_thread->save_critical(m_in_critical);
  155. // clang-format off
  156. // Switch to new thread context, passing from_thread and to_thread
  157. // through to the new context using registers rdx and rax
  158. asm volatile(
  159. // NOTE: changing how much we push to the stack affects thread_context_first_enter()!
  160. "pushfq \n"
  161. "pushq %%rbx \n"
  162. "pushq %%rcx \n"
  163. "pushq %%rbp \n"
  164. "pushq %%rsi \n"
  165. "pushq %%rdi \n"
  166. "pushq %%r8 \n"
  167. "pushq %%r9 \n"
  168. "pushq %%r10 \n"
  169. "pushq %%r11 \n"
  170. "pushq %%r12 \n"
  171. "pushq %%r13 \n"
  172. "pushq %%r14 \n"
  173. "pushq %%r15 \n"
  174. "movq %%rsp, %[from_rsp] \n"
  175. "leaq 1f(%%rip), %%rbx \n"
  176. "movq %%rbx, %[from_rip] \n"
  177. "movq %[to_rsp0], %%rbx \n"
  178. "movl %%ebx, %[tss_rsp0l] \n"
  179. "shrq $32, %%rbx \n"
  180. "movl %%ebx, %[tss_rsp0h] \n"
  181. "movq %[to_rsp], %%rsp \n"
  182. "pushq %[to_thread] \n"
  183. "pushq %[from_thread] \n"
  184. "pushq %[to_rip] \n"
  185. "cld \n"
  186. "movq 16(%%rsp), %%rsi \n"
  187. "movq 8(%%rsp), %%rdi \n"
  188. "jmp enter_thread_context \n"
  189. "1: \n"
  190. "popq %%rdx \n"
  191. "popq %%rax \n"
  192. "popq %%r15 \n"
  193. "popq %%r14 \n"
  194. "popq %%r13 \n"
  195. "popq %%r12 \n"
  196. "popq %%r11 \n"
  197. "popq %%r10 \n"
  198. "popq %%r9 \n"
  199. "popq %%r8 \n"
  200. "popq %%rdi \n"
  201. "popq %%rsi \n"
  202. "popq %%rbp \n"
  203. "popq %%rcx \n"
  204. "popq %%rbx \n"
  205. "popfq \n"
  206. : [from_rsp] "=m" (from_thread->regs().rsp),
  207. [from_rip] "=m" (from_thread->regs().rip),
  208. [tss_rsp0l] "=m" (m_tss.rsp0l),
  209. [tss_rsp0h] "=m" (m_tss.rsp0h),
  210. "=d" (from_thread), // needed so that from_thread retains the correct value
  211. "=a" (to_thread) // needed so that to_thread retains the correct value
  212. : [to_rsp] "g" (to_thread->regs().rsp),
  213. [to_rsp0] "g" (to_thread->regs().rsp0),
  214. [to_rip] "c" (to_thread->regs().rip),
  215. [from_thread] "d" (from_thread),
  216. [to_thread] "a" (to_thread)
  217. : "memory", "rbx"
  218. );
  219. // clang-format on
  220. dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
  221. }
  222. UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
  223. {
  224. VERIFY(initial_thread.process().is_kernel_process());
  225. auto& regs = initial_thread.regs();
  226. m_tss.iomapbase = sizeof(m_tss);
  227. m_tss.rsp0l = regs.rsp0 & 0xffffffff;
  228. m_tss.rsp0h = regs.rsp0 >> 32;
  229. m_scheduler_initialized = true;
  230. // clang-format off
  231. asm volatile(
  232. "movq %[new_rsp], %%rsp \n" // switch to new stack
  233. "pushq %[from_to_thread] \n" // to_thread
  234. "pushq %[from_to_thread] \n" // from_thread
  235. "pushq %[new_rip] \n" // save the entry rip to the stack
  236. "cld \n"
  237. "pushq %[cpu] \n" // push argument for init_finished before register is clobbered
  238. "call pre_init_finished \n"
  239. "pop %%rdi \n" // move argument for init_finished into place
  240. "call init_finished \n"
  241. "call post_init_finished \n"
  242. "movq 24(%%rsp), %%rdi \n" // move pointer to TrapFrame into place
  243. "call enter_trap_no_irq \n"
  244. "retq \n"
  245. :: [new_rsp] "g" (regs.rsp),
  246. [new_rip] "a" (regs.rip),
  247. [from_to_thread] "b" (&initial_thread),
  248. [cpu] "c" ((u64)id())
  249. );
  250. // clang-format on
  251. VERIFY_NOT_REACHED();
  252. }
  253. }