PerformanceManager.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <Kernel/PerformanceEventBuffer.h>
  8. #include <Kernel/Tasks/Process.h>
  9. #include <Kernel/Tasks/Thread.h>
  10. #include <Kernel/Time/TimeManagement.h>
  11. namespace Kernel {
  12. class PerformanceManager {
  13. public:
  14. static void add_process_created_event(Process& process)
  15. {
  16. if (g_profiling_all_threads) {
  17. VERIFY(g_global_perf_events);
  18. (void)g_global_perf_events->add_process(process, ProcessEventType::Create);
  19. }
  20. }
  21. static void add_process_exec_event(Process& process)
  22. {
  23. if (auto* event_buffer = process.current_perf_events_buffer()) {
  24. (void)event_buffer->add_process(process, ProcessEventType::Exec);
  25. }
  26. }
  27. static void add_process_exit_event(Process& process)
  28. {
  29. if (g_profiling_all_threads) {
  30. VERIFY(g_global_perf_events);
  31. [[maybe_unused]] auto rc = g_global_perf_events->append_with_ip_and_bp(
  32. process.pid(), 0, 0, 0, PERF_EVENT_PROCESS_EXIT, 0, 0, 0, {});
  33. }
  34. }
  35. static void add_thread_created_event(Thread& thread)
  36. {
  37. if (thread.is_profiling_suppressed())
  38. return;
  39. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  40. [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_CREATE, thread.tid().value(), 0, {}, &thread);
  41. }
  42. }
  43. static void add_thread_exit_event(Thread& thread)
  44. {
  45. // As an exception this doesn't check whether profiling is suppressed for
  46. // the thread so we can record the thread_exit event anyway.
  47. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  48. [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_EXIT, thread.tid().value(), 0, {}, &thread);
  49. }
  50. }
  51. static void add_cpu_sample_event(Thread& current_thread, RegisterState const& regs, u32 lost_time)
  52. {
  53. if (current_thread.is_profiling_suppressed())
  54. return;
  55. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  56. [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
  57. current_thread.pid(), current_thread.tid(), regs, PERF_EVENT_SAMPLE, lost_time, 0, 0, {});
  58. }
  59. }
  60. static void add_mmap_perf_event(Process& current_process, Memory::Region const& region)
  61. {
  62. if (auto* event_buffer = current_process.current_perf_events_buffer()) {
  63. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MMAP, region.vaddr().get(), region.size(), region.name());
  64. }
  65. }
  66. static void add_unmap_perf_event(Process& current_process, Memory::VirtualRange const& region)
  67. {
  68. if (auto* event_buffer = current_process.current_perf_events_buffer()) {
  69. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, region.base().get(), region.size(), {});
  70. }
  71. }
  72. static void add_context_switch_perf_event(Thread& current_thread, Thread& next_thread)
  73. {
  74. if (current_thread.is_profiling_suppressed())
  75. return;
  76. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  77. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_CONTEXT_SWITCH, next_thread.pid().value(), next_thread.tid().value(), {});
  78. }
  79. }
  80. static void add_kmalloc_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
  81. {
  82. if (current_thread.is_profiling_suppressed())
  83. return;
  84. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  85. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KMALLOC, size, ptr, {});
  86. }
  87. }
  88. static void add_kfree_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
  89. {
  90. if (current_thread.is_profiling_suppressed())
  91. return;
  92. if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
  93. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KFREE, size, ptr, {});
  94. }
  95. }
  96. static void add_page_fault_event(Thread& thread, RegisterState const& regs)
  97. {
  98. if (thread.is_profiling_suppressed())
  99. return;
  100. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  101. [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
  102. thread.pid(), thread.tid(), regs, PERF_EVENT_PAGE_FAULT, 0, 0, 0, {});
  103. }
  104. }
  105. static void add_syscall_event(Thread& thread, RegisterState const& regs)
  106. {
  107. if (thread.is_profiling_suppressed())
  108. return;
  109. if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
  110. [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
  111. thread.pid(), thread.tid(), regs, PERF_EVENT_SYSCALL, 0, 0, 0, {});
  112. }
  113. }
  114. static void add_read_event(Thread& thread, int fd, size_t size, OpenFileDescription const& file_description, u64 start_timestamp, ErrorOr<FlatPtr> const& result)
  115. {
  116. if (thread.is_profiling_suppressed())
  117. return;
  118. auto* event_buffer = thread.process().current_perf_events_buffer();
  119. if (event_buffer == nullptr)
  120. return;
  121. size_t filepath_string_index;
  122. if (auto path = file_description.original_absolute_path(); !path.is_error()) {
  123. auto registered_result = event_buffer->register_string(move(path.value()));
  124. if (registered_result.is_error())
  125. return;
  126. filepath_string_index = registered_result.value();
  127. } else if (auto pseudo_path = file_description.pseudo_path(); !pseudo_path.is_error()) {
  128. auto registered_result = event_buffer->register_string(move(pseudo_path.value()));
  129. if (registered_result.is_error())
  130. return;
  131. filepath_string_index = registered_result.value();
  132. } else {
  133. auto invalid_path_string = KString::try_create("<INVALID_FILE_PATH>"sv); // TODO: Performance, unnecessary allocations.
  134. if (invalid_path_string.is_error())
  135. return;
  136. auto registered_result = event_buffer->register_string(move(invalid_path_string.value()));
  137. if (registered_result.is_error())
  138. return;
  139. filepath_string_index = registered_result.value();
  140. }
  141. [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_READ, fd, size, {}, &thread, filepath_string_index, start_timestamp, result); // wrong arguments
  142. }
  143. static void timer_tick(RegisterState const& regs)
  144. {
  145. static UnixDateTime last_wakeup;
  146. auto now = kgettimeofday();
  147. constexpr auto ideal_interval = Duration::from_microseconds(1000'000 / OPTIMAL_PROFILE_TICKS_PER_SECOND_RATE);
  148. auto expected_wakeup = last_wakeup + ideal_interval;
  149. auto delay = (now > expected_wakeup) ? now - expected_wakeup : Duration::from_microseconds(0);
  150. last_wakeup = now;
  151. auto* current_thread = Thread::current();
  152. // FIXME: We currently don't collect samples while idle.
  153. // That will be an interesting mode to add in the future. :^)
  154. if (!current_thread || current_thread == Processor::idle_thread())
  155. return;
  156. auto lost_samples = delay.to_microseconds() / ideal_interval.to_microseconds();
  157. PerformanceManager::add_cpu_sample_event(*current_thread, regs, lost_samples);
  158. }
  159. };
  160. }