Sfoglia il codice sorgente

Kernel: Don't log profile data before/after the process/thread lifetime

There were a few cases where we could end up logging profiling events
before or after the associated process or thread exists in the profile:

After enabling profiling we might end up with CPU samples before we
had a chance to synthesize process/thread creation events.

After a thread exits we would still log associated kmalloc/kfree
events. Instead we now just ignore those events.
Gunnar Beutner 4 anni fa
parent
commit
01c75e3a34

+ 10 - 10
Kernel/Heap/kmalloc.cpp

@@ -257,11 +257,11 @@ void* kmalloc(size_t size)
         PANIC("kmalloc: Out of memory (requested size: {})", size);
         PANIC("kmalloc: Out of memory (requested size: {})", size);
     }
     }
 
 
-    Process* current_process = Process::current();
-    if (!current_process && Scheduler::colonel_initialized())
-        current_process = Scheduler::colonel();
-    if (current_process)
-        PerformanceManager::add_kmalloc_perf_event(*current_process, size, (FlatPtr)ptr);
+    Thread* current_thread = Thread::current();
+    if (!current_thread)
+        current_thread = Processor::idle_thread();
+    if (current_thread)
+        PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
 
 
     return ptr;
     return ptr;
 }
 }
@@ -277,11 +277,11 @@ void kfree(void* ptr)
     ++g_nested_kfree_calls;
     ++g_nested_kfree_calls;
 
 
     if (g_nested_kfree_calls == 1) {
     if (g_nested_kfree_calls == 1) {
-        Process* current_process = Process::current();
-        if (!current_process && Scheduler::colonel_initialized())
-            current_process = Scheduler::colonel();
-        if (current_process)
-            PerformanceManager::add_kfree_perf_event(*current_process, 0, (FlatPtr)ptr);
+        Thread* current_thread = Thread::current();
+        if (!current_thread)
+            current_thread = Processor::idle_thread();
+        if (current_thread)
+            PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
     }
     }
 
 
     g_kmalloc_global->m_heap.deallocate(ptr);
     g_kmalloc_global->m_heap.deallocate(ptr);

+ 18 - 4
Kernel/PerformanceManager.h

@@ -40,6 +40,8 @@ public:
 
 
     inline static void add_thread_created_event(Thread& thread)
     inline static void add_thread_created_event(Thread& thread)
     {
     {
+        if (thread.is_profiling_suppressed())
+            return;
         if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
         if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_CREATE, thread.tid().value(), 0, nullptr, &thread);
             [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_CREATE, thread.tid().value(), 0, nullptr, &thread);
         }
         }
@@ -47,6 +49,8 @@ public:
 
 
     inline static void add_thread_exit_event(Thread& thread)
     inline static void add_thread_exit_event(Thread& thread)
     {
     {
+        // As an exception this doesn't check whether profiling is suppressed for
+        // the thread so we can record the thread_exit event anyway.
         if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
         if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_EXIT, thread.tid().value(), 0, nullptr, &thread);
             [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_EXIT, thread.tid().value(), 0, nullptr, &thread);
         }
         }
@@ -54,6 +58,8 @@ public:
 
 
     inline static void add_cpu_sample_event(Thread& current_thread, const RegisterState& regs, u32 lost_time)
     inline static void add_cpu_sample_event(Thread& current_thread, const RegisterState& regs, u32 lost_time)
     {
     {
+        if (current_thread.is_profiling_suppressed())
+            return;
         if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
         if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
             [[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
                 current_thread.pid(), current_thread.tid(),
                 current_thread.pid(), current_thread.tid(),
@@ -77,27 +83,35 @@ public:
 
 
     inline static void add_context_switch_perf_event(Thread& current_thread, Thread& next_thread)
     inline static void add_context_switch_perf_event(Thread& current_thread, Thread& next_thread)
     {
     {
+        if (current_thread.is_profiling_suppressed())
+            return;
         if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
         if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_CONTEXT_SWITCH, next_thread.pid().value(), next_thread.tid().value(), nullptr);
             [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_CONTEXT_SWITCH, next_thread.pid().value(), next_thread.tid().value(), nullptr);
         }
         }
     }
     }
 
 
-    inline static void add_kmalloc_perf_event(Process& current_process, size_t size, FlatPtr ptr)
+    inline static void add_kmalloc_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
     {
     {
-        if (auto* event_buffer = current_process.current_perf_events_buffer()) {
+        if (current_thread.is_profiling_suppressed())
+            return;
+        if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KMALLOC, size, ptr, nullptr);
             [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KMALLOC, size, ptr, nullptr);
         }
         }
     }
     }
 
 
-    inline static void add_kfree_perf_event(Process& current_process, size_t size, FlatPtr ptr)
+    inline static void add_kfree_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
     {
     {
-        if (auto* event_buffer = current_process.current_perf_events_buffer()) {
+        if (current_thread.is_profiling_suppressed())
+            return;
+        if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KFREE, size, ptr, nullptr);
             [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KFREE, size, ptr, nullptr);
         }
         }
     }
     }
 
 
     inline static void add_page_fault_event(Thread& thread, const RegisterState& regs)
     inline static void add_page_fault_event(Thread& thread, const RegisterState& regs)
     {
     {
+        if (thread.is_profiling_suppressed())
+            return;
         if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
         if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
             [[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
             [[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
                 thread.pid(), thread.tid(),
                 thread.pid(), thread.tid(),

+ 0 - 5
Kernel/Scheduler.cpp

@@ -445,11 +445,6 @@ void Scheduler::prepare_for_idle_loop()
     scheduler_data.m_in_scheduler = true;
     scheduler_data.m_in_scheduler = true;
 }
 }
 
 
-bool Scheduler::colonel_initialized()
-{
-    return !!s_colonel_process;
-}
-
 Process* Scheduler::colonel()
 Process* Scheduler::colonel()
 {
 {
     VERIFY(s_colonel_process);
     VERIFY(s_colonel_process);

+ 0 - 1
Kernel/Scheduler.h

@@ -43,7 +43,6 @@ public:
     static void leave_on_first_switch(u32 flags);
     static void leave_on_first_switch(u32 flags);
     static void prepare_after_exec();
     static void prepare_after_exec();
     static void prepare_for_idle_loop();
     static void prepare_for_idle_loop();
-    static bool colonel_initialized();
     static Process* colonel();
     static Process* colonel();
     static void idle_loop(void*);
     static void idle_loop(void*);
     static void invoke_async();
     static void invoke_async();

+ 1 - 0
Kernel/Syscalls/exit.cpp

@@ -19,6 +19,7 @@ void Process::sys$exit(int status)
     }
     }
 
 
     auto* current_thread = Thread::current();
     auto* current_thread = Thread::current();
+    current_thread->set_profiling_suppressed();
     PerformanceManager::add_thread_exit_event(*current_thread);
     PerformanceManager::add_thread_exit_event(*current_thread);
 
 
     die();
     die();

+ 4 - 2
Kernel/Syscalls/profiling.cpp

@@ -25,7 +25,7 @@ KResultOr<int> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
         if (!is_superuser())
         if (!is_superuser())
             return EPERM;
             return EPERM;
         ScopedCritical critical;
         ScopedCritical critical;
-        g_profiling_event_mask = event_mask;
+        g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP;
         if (g_global_perf_events)
         if (g_global_perf_events)
             g_global_perf_events->clear();
             g_global_perf_events->clear();
         else
         else
@@ -40,6 +40,7 @@ KResultOr<int> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
             PerformanceManager::add_process_created_event(process);
             PerformanceManager::add_process_created_event(process);
             return IterationDecision::Continue;
             return IterationDecision::Continue;
         });
         });
+        g_profiling_event_mask = event_mask;
         return 0;
         return 0;
     }
     }
 
 
@@ -51,12 +52,13 @@ KResultOr<int> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
         return ESRCH;
         return ESRCH;
     if (!is_superuser() && process->uid() != euid())
     if (!is_superuser() && process->uid() != euid())
         return EPERM;
         return EPERM;
-    g_profiling_event_mask = event_mask;
+    g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP;
     process->set_profiling(true);
     process->set_profiling(true);
     if (!process->create_perf_events_buffer_if_needed()) {
     if (!process->create_perf_events_buffer_if_needed()) {
         process->set_profiling(false);
         process->set_profiling(false);
         return ENOMEM;
         return ENOMEM;
     }
     }
+    g_profiling_event_mask = event_mask;
     if (!TimeManagement::the().enable_profile_timer()) {
     if (!TimeManagement::the().enable_profile_timer()) {
         process->set_profiling(false);
         process->set_profiling(false);
         return ENOTSUP;
         return ENOTSUP;

+ 1 - 0
Kernel/Syscalls/thread.cpp

@@ -88,6 +88,7 @@ void Process::sys$exit_thread(Userspace<void*> exit_value, Userspace<void*> stac
     }
     }
 
 
     auto current_thread = Thread::current();
     auto current_thread = Thread::current();
+    current_thread->set_profiling_suppressed();
     PerformanceManager::add_thread_exit_event(*current_thread);
     PerformanceManager::add_thread_exit_event(*current_thread);
 
 
     if (stack_location) {
     if (stack_location) {

+ 5 - 0
Kernel/Thread.h

@@ -1131,6 +1131,9 @@ public:
         return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
         return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
     }
     }
 
 
+    bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
+    void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
+
 private:
 private:
     Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region>, NonnullRefPtr<Timer>);
     Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region>, NonnullRefPtr<Timer>);
 
 
@@ -1273,6 +1276,8 @@ private:
 
 
     RefPtr<Timer> m_block_timer;
     RefPtr<Timer> m_block_timer;
 
 
+    bool m_is_profiling_suppressed { false };
+
     void yield_without_holding_big_lock();
     void yield_without_holding_big_lock();
     void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
     void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
     void yield_while_not_holding_big_lock();
     void yield_while_not_holding_big_lock();