소스 검색

Kernel: Fix retreiving frame pointer from a thread

If we're trying to walk the stack for another thread, we can
no longer retreive the EBP register from Thread::m_tss. Instead,
we need to look at the top of the kernel stack, because all threads
not currently running were last in kernel mode. Context switches
now always trigger a brief switch to kernel mode, and Thread::m_tss
only is used to save ESP and EIP.

Fixes #2678
Tom 5 년 전
부모
커밋
bb84fad0bf
4개의 변경된 파일67개의 추가작업 그리고 28개의 파일을 삭제
  1. 39 0
      Kernel/Arch/i386/CPU.cpp
  2. 1 0
      Kernel/Arch/i386/CPU.h
  3. 25 25
      Kernel/Thread.cpp
  4. 2 3
      Kernel/Thread.h

+ 39 - 0
Kernel/Arch/i386/CPU.cpp

@@ -960,6 +960,45 @@ const DescriptorTablePointer& Processor::get_gdtr()
     return m_gdtr;
     return m_gdtr;
 }
 }
 
 
+bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
+{
+    ScopedCritical critical;
+    auto& proc = Processor::current();
+    if (&thread == proc.current_thread()) {
+        ASSERT(thread.state() == Thread::Running);
+        asm volatile("movl %%ebp, %%eax"
+                     : "=g"(frame_ptr));
+    } else {
+        // Since the thread may be running on another processor, there
+        // is a chance a context switch may happen while we're trying
+        // to get it. It also won't be entirely accurate and merely
+        // reflect the status at the last context switch.
+        ScopedSpinLock lock(g_scheduler_lock);
+        if (thread.state() == Thread::Running) {
+            ASSERT(thread.cpu() != proc.id());
+            // TODO: If this is the case, the thread is currently running
+            // on another processor. We can't trust the kernel stack as
+            // it may be changing at any time. We need to probably send
+            // an ICI to that processor, have it walk the stack and wait
+            // until it returns the data back to us
+            dbg() << "CPU[" << proc.id() << "] getting stack for "
+                << thread << " on other CPU# " << thread.cpu() << " not yet implemented!";
+            frame_ptr = eip = 0; // TODO
+            return false;
+        } else {
+            // We need to retrieve ebp from what was last pushed to the kernel
+            // stack. Before switching out of that thread, it switch_context
+            // pushed the callee-saved registers, and the last of them happens
+            // to be ebp.
+            auto& tss = thread.tss();
+            u32* stack_top = reinterpret_cast<u32*>(tss.esp);
+            frame_ptr = stack_top[0];
+            eip = tss.eip;
+        }
+    }
+    return true;
+}
+
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
 extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
 {
 {
     ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);
     ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);

+ 1 - 0
Kernel/Arch/i386/CPU.h

@@ -775,6 +775,7 @@ public:
     void switch_context(Thread* from_thread, Thread* to_thread);
     void switch_context(Thread* from_thread, Thread* to_thread);
     [[noreturn]] static void assume_context(Thread& thread, u32 flags);
     [[noreturn]] static void assume_context(Thread& thread, u32 flags);
     u32 init_context(Thread& thread, bool leave_crit);
     u32 init_context(Thread& thread, bool leave_crit);
+    static bool get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip);
     
     
     void set_thread_specific(u8* data, size_t len);
     void set_thread_specific(u8* data, size_t len);
 };
 };

+ 25 - 25
Kernel/Thread.cpp

@@ -739,7 +739,7 @@ void Thread::notify_finalizer()
     g_finalizer_wait_queue->wake_all();
     g_finalizer_wait_queue->wake_all();
 }
 }
 
 
-String Thread::backtrace(ProcessInspectionHandle&) const
+String Thread::backtrace(ProcessInspectionHandle&)
 {
 {
     return backtrace_impl();
     return backtrace_impl();
 }
 }
@@ -775,37 +775,37 @@ static bool symbolicate(const RecognizedSymbol& symbol, const Process& process,
     return true;
     return true;
 }
 }
 
 
-String Thread::backtrace_impl() const
+String Thread::backtrace_impl()
 {
 {
     Vector<RecognizedSymbol, 128> recognized_symbols;
     Vector<RecognizedSymbol, 128> recognized_symbols;
 
 
-    u32 start_frame;
-    if (Thread::current() == this) {
-        asm volatile("movl %%ebp, %%eax"
-                     : "=a"(start_frame));
-    } else {
-        start_frame = frame_ptr();
-        recognized_symbols.append({ tss().eip, symbolicate_kernel_address(tss().eip) });
-    }
-
     auto& process = const_cast<Process&>(this->process());
     auto& process = const_cast<Process&>(this->process());
     auto elf_bundle = process.elf_bundle();
     auto elf_bundle = process.elf_bundle();
     ProcessPagingScope paging_scope(process);
     ProcessPagingScope paging_scope(process);
 
 
-    FlatPtr stack_ptr = start_frame;
-    for (;;) {
-        if (!process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2))
-            break;
-        FlatPtr retaddr;
-
-        if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
-            copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]);
-            recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
-            copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr);
-        } else {
-            memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr));
-            recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
-            memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr));
+    // To prevent a context switch involving this thread, which may happen
+    // on another processor, we need to acquire the scheduler lock while
+    // walking the stack
+    {
+        ScopedSpinLock lock(g_scheduler_lock);
+        FlatPtr stack_ptr, eip;
+        if (Processor::get_context_frame_ptr(*this, stack_ptr, eip)) {
+            recognized_symbols.append({ eip, symbolicate_kernel_address(eip) });
+            for (;;) {
+                if (!process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2))
+                    break;
+                FlatPtr retaddr;
+ 
+                if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
+                    copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]);
+                    recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
+                    copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr);
+                } else {
+                    memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr));
+                    recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
+                    memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr));
+                }
+            }
         }
         }
     }
     }
 
 

+ 2 - 3
Kernel/Thread.h

@@ -104,7 +104,7 @@ public:
     Process& process() { return m_process; }
     Process& process() { return m_process; }
     const Process& process() const { return m_process; }
     const Process& process() const { return m_process; }
 
 
-    String backtrace(ProcessInspectionHandle&) const;
+    String backtrace(ProcessInspectionHandle&);
     Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
     Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
 
 
     const String& name() const { return m_name; }
     const String& name() const { return m_name; }
@@ -283,7 +283,6 @@ public:
     u32 affinity() const { return m_cpu_affinity; }
     u32 affinity() const { return m_cpu_affinity; }
     void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
     void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
 
 
-    u32 frame_ptr() const { return m_tss.ebp; }
     u32 stack_ptr() const { return m_tss.esp; }
     u32 stack_ptr() const { return m_tss.esp; }
 
 
     RegisterState& get_register_dump_from_stack();
     RegisterState& get_register_dump_from_stack();
@@ -465,7 +464,7 @@ private:
     friend class WaitQueue;
     friend class WaitQueue;
     bool unlock_process_if_locked(u32& prev_crit);
     bool unlock_process_if_locked(u32& prev_crit);
     void relock_process(bool did_unlock, u32 prev_crit);
     void relock_process(bool did_unlock, u32 prev_crit);
-    String backtrace_impl() const;
+    String backtrace_impl();
     void reset_fpu_state();
     void reset_fpu_state();
 
 
     Process& m_process;
     Process& m_process;