Browse Source

Kernel: Implement initializing threads on x86_64

Gunnar Beutner 4 năm trước cách đây
mục cha
commit
9ed051fe25

+ 31 - 0
Kernel/Arch/x86/common/Processor.cpp

@@ -40,6 +40,10 @@ Atomic<u32> Processor::s_idle_cpu_mask { 0 };
 extern "C" void thread_context_first_enter(void);
 extern "C" void thread_context_first_enter(void);
 extern "C" void exit_kernel_thread(void);
 extern "C" void exit_kernel_thread(void);
 
 
+// The compiler can't see the calls to this function inside assembly.
+// Declare it, to avoid dead code warnings.
+extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
+
 UNMAP_AFTER_INIT static void sse_init()
 UNMAP_AFTER_INIT static void sse_init()
 {
 {
     write_cr0((read_cr0() & 0xfffffffbu) | 0x2);
     write_cr0((read_cr0() & 0xfffffffbu) | 0x2);
@@ -1134,4 +1138,31 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
     // clang-format on
     // clang-format on
 #endif
 #endif
 }
 }
+
+extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
+{
+    VERIFY(!are_interrupts_enabled());
+    VERIFY(is_kernel_mode());
+
+    dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
+
+    VERIFY(to_thread == Thread::current());
+
+    Scheduler::enter_current(*from_thread, true);
+
+    // Since we got here and don't have Scheduler::context_switch in the
+    // call stack (because this is the first time we switched into this
+    // context), we need to notify the scheduler so that it can release
+    // the scheduler lock. We don't want to enable interrupts at this point
+    // as we're still in the middle of a context switch. Doing so could
+    // trigger a context switch within a context switch, leading to a crash.
+    FlatPtr flags;
+#if ARCH(I386)
+    flags = trap->regs->eflags;
+#else
+    flags = trap->regs->rflags;
+#endif
+    Scheduler::leave_on_first_switch(flags & ~0x200);
+}
+
 }
 }

+ 0 - 21
Kernel/Arch/x86/i386/CPU.cpp

@@ -19,7 +19,6 @@ namespace Kernel {
 // The compiler can't see the calls to these functions inside assembly.
 // The compiler can't see the calls to these functions inside assembly.
 // Declare them, to avoid dead code warnings.
 // Declare them, to avoid dead code warnings.
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
-extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
 extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
 extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
 
 
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
@@ -73,26 +72,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
     // TODO: ioperm?
     // TODO: ioperm?
 }
 }
 
 
-extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
-{
-    VERIFY(!are_interrupts_enabled());
-    VERIFY(is_kernel_mode());
-
-    dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
-
-    VERIFY(to_thread == Thread::current());
-
-    Scheduler::enter_current(*from_thread, true);
-
-    // Since we got here and don't have Scheduler::context_switch in the
-    // call stack (because this is the first time we switched into this
-    // context), we need to notify the scheduler so that it can release
-    // the scheduler lock. We don't want to enable interrupts at this point
-    // as we're still in the middle of a context switch. Doing so could
-    // trigger a context switch within a context switch, leading to a crash.
-    Scheduler::leave_on_first_switch(trap->regs->eflags & ~0x200);
-}
-
 extern "C" u32 do_init_context(Thread* thread, u32 flags)
 extern "C" u32 do_init_context(Thread* thread, u32 flags)
 {
 {
     VERIFY_INTERRUPTS_DISABLED();
     VERIFY_INTERRUPTS_DISABLED();

+ 0 - 6
Kernel/Arch/x86/x86_64/CPU.cpp

@@ -19,7 +19,6 @@ namespace Kernel {
 // The compiler can't see the calls to these functions inside assembly.
 // The compiler can't see the calls to these functions inside assembly.
 // Declare them, to avoid dead code warnings.
 // Declare them, to avoid dead code warnings.
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
-extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
 extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
 extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
 
 
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
@@ -29,11 +28,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
     TODO();
     TODO();
 }
 }
 
 
-extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
-{
-    TODO();
-}
-
 extern "C" u32 do_init_context(Thread* thread, u32 flags)
 extern "C" u32 do_init_context(Thread* thread, u32 flags)
 {
 {
     (void)thread;
     (void)thread;

+ 22 - 17
Kernel/Arch/x86/x86_64/Processor.cpp

@@ -15,7 +15,6 @@
 
 
 namespace Kernel {
 namespace Kernel {
 
 
-#define ENTER_THREAD_CONTEXT_ARGS_SIZE (2 * 4) //  to_thread, from_thread
 extern "C" void thread_context_first_enter(void);
 extern "C" void thread_context_first_enter(void);
 extern "C" void do_assume_context(Thread* thread, u32 flags);
 extern "C" void do_assume_context(Thread* thread, u32 flags);
 extern "C" void exit_kernel_thread(void);
 extern "C" void exit_kernel_thread(void);
@@ -28,11 +27,11 @@ asm(
 // switch_context will have pushed from_thread and to_thread to our new
 // switch_context will have pushed from_thread and to_thread to our new
 // stack prior to thread_context_first_enter() being called, and the
 // stack prior to thread_context_first_enter() being called, and the
 // pointer to TrapFrame was the top of the stack before that
 // pointer to TrapFrame was the top of the stack before that
-"    movl 8(%esp), %ebx \n" // save pointer to TrapFrame
+"    popq %rdi \n" // from_thread (argument 0)
+"    popq %rsi \n" // to_thread (argument 1)
+"    popq %rdx \n" // pointer to TrapFrame (argument 2)
 "    cld \n"
 "    cld \n"
 "    call context_first_init \n"
 "    call context_first_init \n"
-"    addl $" __STRINGIFY(ENTER_THREAD_CONTEXT_ARGS_SIZE) ", %esp \n"
-"    movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
 "    jmp common_trap_exit \n"
 "    jmp common_trap_exit \n"
 );
 );
 // clang-format on
 // clang-format on
@@ -79,12 +78,12 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
         VERIFY(in_critical() == 1);
         VERIFY(in_critical() == 1);
     }
     }
 
 
-    u32 kernel_stack_top = thread.kernel_stack_top();
+    u64 kernel_stack_top = thread.kernel_stack_top();
 
 
     // Add a random offset between 0-256 (16-byte aligned)
     // Add a random offset between 0-256 (16-byte aligned)
     kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
     kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
 
 
-    u32 stack_top = kernel_stack_top;
+    u64 stack_top = kernel_stack_top;
 
 
     // TODO: handle NT?
     // TODO: handle NT?
     VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
     VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
@@ -102,13 +101,13 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
         // which should be in regs.rsp and exit_kernel_thread as return
         // which should be in regs.rsp and exit_kernel_thread as return
         // address.
         // address.
         stack_top -= 2 * sizeof(u64);
         stack_top -= 2 * sizeof(u64);
-        *reinterpret_cast<u64*>(kernel_stack_top - 2 * sizeof(u32)) = regs.rsp;
-        *reinterpret_cast<u32*>(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread);
+        *reinterpret_cast<u64*>(kernel_stack_top - 2 * sizeof(u64)) = regs.rsp;
+        *reinterpret_cast<u64*>(kernel_stack_top - 3 * sizeof(u64)) = FlatPtr(&exit_kernel_thread);
     } else {
     } else {
         stack_top -= sizeof(RegisterState);
         stack_top -= sizeof(RegisterState);
     }
     }
 
 
-    // we want to end up 16-byte aligned, %esp + 4 should be aligned
+    // we want to end up 16-byte aligned, %rsp + 8 should be aligned
     stack_top -= sizeof(u64);
     stack_top -= sizeof(u64);
     *reinterpret_cast<u64*>(kernel_stack_top - sizeof(u64)) = 0;
     *reinterpret_cast<u64*>(kernel_stack_top - sizeof(u64)) = 0;
 
 
@@ -116,7 +115,19 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
     // we will end up either in kernel mode or user mode, depending on how the thread is set up
     // we will end up either in kernel mode or user mode, depending on how the thread is set up
     // However, the first step is to always start in kernel mode with thread_context_first_enter
     // However, the first step is to always start in kernel mode with thread_context_first_enter
     RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
     RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
-    // FIXME: copy state to be recovered through TSS
+    iretframe.rdi = regs.rdi;
+    iretframe.rsi = regs.rsi;
+    iretframe.rbp = regs.rbp;
+    iretframe.rsp = 0;
+    iretframe.rbx = regs.rbx;
+    iretframe.rdx = regs.rdx;
+    iretframe.rcx = regs.rcx;
+    iretframe.rax = regs.rax;
+    iretframe.rflags = regs.rflags;
+    iretframe.rip = regs.rip;
+    iretframe.cs = regs.cs;
+    if (return_to_user)
+        iretframe.userspace_rsp = regs.rsp;
 
 
     // make space for a trap frame
     // make space for a trap frame
     stack_top -= sizeof(TrapFrame);
     stack_top -= sizeof(TrapFrame);
@@ -205,21 +216,15 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
         "movq %[new_rsp], %%rsp \n" // switch to new stack
         "movq %[new_rsp], %%rsp \n" // switch to new stack
         "pushq %[from_to_thread] \n" // to_thread
         "pushq %[from_to_thread] \n" // to_thread
         "pushq %[from_to_thread] \n" // from_thread
         "pushq %[from_to_thread] \n" // from_thread
-        "pushq $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n"
         "pushq %[new_rip] \n" // save the entry rip to the stack
         "pushq %[new_rip] \n" // save the entry rip to the stack
-        "movq %%rsp, %%rbx \n"
-        "addq $40, %%rbx \n" // calculate pointer to TrapFrame
-        "pushq %%rbx \n"
         "cld \n"
         "cld \n"
         "pushq %[cpu] \n" // push argument for init_finished before register is clobbered
         "pushq %[cpu] \n" // push argument for init_finished before register is clobbered
         "call pre_init_finished \n"
         "call pre_init_finished \n"
         "pop %%rdi \n" // move argument for init_finished into place
         "pop %%rdi \n" // move argument for init_finished into place
         "call init_finished \n"
         "call init_finished \n"
-        "addq $8, %%rsp \n"
         "call post_init_finished \n"
         "call post_init_finished \n"
-        "pop %%rdi \n" // move pointer to TrapFrame into place
+        "movq 24(%%rsp), %%rdi \n" // move pointer to TrapFrame into place
         "call enter_trap_no_irq \n"
         "call enter_trap_no_irq \n"
-        "addq $8, %%rsp \n"
         "retq \n"
         "retq \n"
         :: [new_rsp] "g" (regs.rsp),
         :: [new_rsp] "g" (regs.rsp),
         [new_rip] "a" (regs.rip),
         [new_rip] "a" (regs.rip),

+ 6 - 0
Kernel/Thread.cpp

@@ -105,7 +105,13 @@ Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Region> kernel_stac
         m_regs.gs = GDT_SELECTOR_TLS | 3;
         m_regs.gs = GDT_SELECTOR_TLS | 3;
     }
     }
 #else
 #else
+    // Only IF is set when a process boots.
     m_regs.rflags = 0x0202;
     m_regs.rflags = 0x0202;
+
+    if (m_process->is_kernel_process())
+        m_regs.cs = GDT_SELECTOR_CODE0;
+    else
+        m_regs.cs = GDT_SELECTOR_CODE3 | 3;
 #endif
 #endif
 
 
     m_regs.cr3 = m_process->space().page_directory().cr3();
     m_regs.cr3 = m_process->space().page_directory().cr3();