PerformanceManager.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /*
  2. * Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <Kernel/PerformanceEventBuffer.h>
  8. #include <Kernel/Process.h>
  9. #include <Kernel/Thread.h>
  10. namespace Kernel {
  11. class PerformanceManager {
  12. public:
  13. inline static void add_process_created_event(Process& process)
  14. {
  15. if (g_profiling_all_threads) {
  16. VERIFY(g_global_perf_events);
  17. g_global_perf_events->add_process(process, ProcessEventType::Create);
  18. }
  19. }
  20. inline static void add_process_exec_event(Process& process)
  21. {
  22. if (auto* event_buffer = process.current_perf_events_buffer()) {
  23. event_buffer->add_process(process, ProcessEventType::Exec);
  24. }
  25. }
  26. inline static void add_process_exit_event(Process& process)
  27. {
  28. if (g_profiling_all_threads) {
  29. VERIFY(g_global_perf_events);
  30. [[maybe_unused]] auto rc = g_global_perf_events->append_with_ip_and_bp(
  31. process.pid(), 0, 0, 0, PERF_EVENT_PROCESS_EXIT, 0, 0, 0, nullptr);
  32. }
  33. }
  34. inline static void add_thread_created_event(Thread& thread)
  35. {
  36. if (thread.is_profiling_suppressed())
  37. return;
  38. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  39. [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_CREATE, thread.tid().value(), 0, nullptr, &thread);
  40. }
  41. }
  42. inline static void add_thread_exit_event(Thread& thread)
  43. {
  44. // As an exception this doesn't check whether profiling is suppressed for
  45. // the thread so we can record the thread_exit event anyway.
  46. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  47. [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_EXIT, thread.tid().value(), 0, nullptr, &thread);
  48. }
  49. }
  50. inline static void add_cpu_sample_event(Thread& current_thread, const RegisterState& regs, u32 lost_time)
  51. {
  52. if (current_thread.is_profiling_suppressed())
  53. return;
  54. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  55. [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
  56. current_thread.pid(), current_thread.tid(), regs, PERF_EVENT_SAMPLE, lost_time, 0, 0, nullptr);
  57. }
  58. }
  59. inline static void add_mmap_perf_event(Process& current_process, Memory::Region const& region)
  60. {
  61. if (auto* event_buffer = current_process.current_perf_events_buffer()) {
  62. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MMAP, region.vaddr().get(), region.size(), region.name());
  63. }
  64. }
  65. inline static void add_unmap_perf_event(Process& current_process, Memory::VirtualRange const& region)
  66. {
  67. if (auto* event_buffer = current_process.current_perf_events_buffer()) {
  68. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, region.base().get(), region.size(), nullptr);
  69. }
  70. }
  71. inline static void add_context_switch_perf_event(Thread& current_thread, Thread& next_thread)
  72. {
  73. if (current_thread.is_profiling_suppressed())
  74. return;
  75. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  76. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_CONTEXT_SWITCH, next_thread.pid().value(), next_thread.tid().value(), nullptr);
  77. }
  78. }
  79. inline static void add_kmalloc_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
  80. {
  81. if (current_thread.is_profiling_suppressed())
  82. return;
  83. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  84. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KMALLOC, size, ptr, nullptr);
  85. }
  86. }
  87. inline static void add_kfree_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
  88. {
  89. if (current_thread.is_profiling_suppressed())
  90. return;
  91. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  92. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KFREE, size, ptr, nullptr);
  93. }
  94. }
  95. inline static void add_page_fault_event(Thread& thread, const RegisterState& regs)
  96. {
  97. if (thread.is_profiling_suppressed())
  98. return;
  99. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  100. [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
  101. thread.pid(), thread.tid(), regs, PERF_EVENT_PAGE_FAULT, 0, 0, 0, nullptr);
  102. }
  103. }
  104. inline static void add_syscall_event(Thread& thread, const RegisterState& regs)
  105. {
  106. if (thread.is_profiling_suppressed())
  107. return;
  108. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  109. [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
  110. thread.pid(), thread.tid(), regs, PERF_EVENT_SYSCALL, 0, 0, 0, nullptr);
  111. }
  112. }
  113. inline static void timer_tick(RegisterState const& regs)
  114. {
  115. static Time last_wakeup;
  116. auto now = kgettimeofday();
  117. constexpr auto ideal_interval = Time::from_microseconds(1000'000 / OPTIMAL_PROFILE_TICKS_PER_SECOND_RATE);
  118. auto expected_wakeup = last_wakeup + ideal_interval;
  119. auto delay = (now > expected_wakeup) ? now - expected_wakeup : Time::from_microseconds(0);
  120. last_wakeup = now;
  121. auto* current_thread = Thread::current();
  122. // FIXME: We currently don't collect samples while idle.
  123. // That will be an interesting mode to add in the future. :^)
  124. if (!current_thread || current_thread == Processor::idle_thread())
  125. return;
  126. auto lost_samples = delay.to_microseconds() / ideal_interval.to_microseconds();
  127. PerformanceManager::add_cpu_sample_event(*current_thread, regs, lost_samples);
  128. }
  129. };
  130. }