PerformanceEventBuffer.cpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * Copyright (c) 2020-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/JsonArraySerializer.h>
  7. #include <AK/JsonObject.h>
  8. #include <AK/JsonObjectSerializer.h>
  9. #include <Kernel/Arch/x86/SmapDisabler.h>
  10. #include <Kernel/FileSystem/Custody.h>
  11. #include <Kernel/KBufferBuilder.h>
  12. #include <Kernel/PerformanceEventBuffer.h>
  13. #include <Kernel/Process.h>
  14. namespace Kernel {
  15. PerformanceEventBuffer::PerformanceEventBuffer(NonnullOwnPtr<KBuffer> buffer)
  16. : m_buffer(move(buffer))
  17. {
  18. }
  19. NEVER_INLINE KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2, const StringView& arg3, Thread* current_thread)
  20. {
  21. FlatPtr ebp;
  22. asm volatile("movl %%ebp, %%eax"
  23. : "=a"(ebp));
  24. return append_with_eip_and_ebp(current_thread->pid(), current_thread->tid(), 0, ebp, type, arg1, arg2, arg3);
  25. }
  26. static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(FlatPtr ebp, FlatPtr eip)
  27. {
  28. Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace;
  29. if (eip != 0)
  30. backtrace.append(eip);
  31. FlatPtr stack_ptr_copy;
  32. FlatPtr stack_ptr = (FlatPtr)ebp;
  33. // FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks.
  34. SmapDisabler disabler;
  35. while (stack_ptr) {
  36. void* fault_at;
  37. if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
  38. break;
  39. FlatPtr retaddr;
  40. if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
  41. break;
  42. if (retaddr == 0)
  43. break;
  44. backtrace.append(retaddr);
  45. if (backtrace.size() == PerformanceEvent::max_stack_frame_count)
  46. break;
  47. stack_ptr = stack_ptr_copy;
  48. }
  49. return backtrace;
  50. }
  51. KResult PerformanceEventBuffer::append_with_eip_and_ebp(ProcessID pid, ThreadID tid,
  52. u32 eip, u32 ebp, int type, FlatPtr arg1, FlatPtr arg2, const StringView& arg3)
  53. {
  54. if (count() >= capacity())
  55. return ENOBUFS;
  56. PerformanceEvent event;
  57. event.type = type;
  58. switch (type) {
  59. case PERF_EVENT_SAMPLE:
  60. break;
  61. case PERF_EVENT_MALLOC:
  62. event.data.malloc.size = arg1;
  63. event.data.malloc.ptr = arg2;
  64. break;
  65. case PERF_EVENT_FREE:
  66. event.data.free.ptr = arg1;
  67. break;
  68. case PERF_EVENT_MMAP:
  69. event.data.mmap.ptr = arg1;
  70. event.data.mmap.size = arg2;
  71. memset(event.data.mmap.name, 0, sizeof(event.data.mmap.name));
  72. if (!arg3.is_empty())
  73. memcpy(event.data.mmap.name, arg3.characters_without_null_termination(), min(arg3.length(), sizeof(event.data.mmap.name) - 1));
  74. break;
  75. case PERF_EVENT_MUNMAP:
  76. event.data.munmap.ptr = arg1;
  77. event.data.munmap.size = arg2;
  78. break;
  79. case PERF_EVENT_PROCESS_CREATE:
  80. event.data.process_create.parent_pid = arg1;
  81. memset(event.data.process_create.executable, 0, sizeof(event.data.process_create.executable));
  82. if (!arg3.is_empty()) {
  83. memcpy(event.data.process_create.executable, arg3.characters_without_null_termination(),
  84. min(arg3.length(), sizeof(event.data.process_create.executable) - 1));
  85. }
  86. break;
  87. case PERF_EVENT_PROCESS_EXEC:
  88. memset(event.data.process_exec.executable, 0, sizeof(event.data.process_exec.executable));
  89. if (!arg3.is_empty()) {
  90. memcpy(event.data.process_exec.executable, arg3.characters_without_null_termination(),
  91. min(arg3.length(), sizeof(event.data.process_exec.executable) - 1));
  92. }
  93. break;
  94. case PERF_EVENT_PROCESS_EXIT:
  95. break;
  96. case PERF_EVENT_THREAD_CREATE:
  97. event.data.thread_create.parent_tid = arg1;
  98. break;
  99. case PERF_EVENT_THREAD_EXIT:
  100. break;
  101. default:
  102. return EINVAL;
  103. }
  104. auto backtrace = raw_backtrace(ebp, eip);
  105. event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size()));
  106. memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr));
  107. event.pid = pid.value();
  108. event.tid = tid.value();
  109. event.timestamp = TimeManagement::the().uptime_ms();
  110. at(m_count++) = event;
  111. return KSuccess;
  112. }
  113. PerformanceEvent& PerformanceEventBuffer::at(size_t index)
  114. {
  115. VERIFY(index < capacity());
  116. auto* events = reinterpret_cast<PerformanceEvent*>(m_buffer->data());
  117. return events[index];
  118. }
  119. template<typename Serializer>
  120. bool PerformanceEventBuffer::to_json_impl(Serializer& object) const
  121. {
  122. auto array = object.add_array("events");
  123. for (size_t i = 0; i < m_count; ++i) {
  124. auto& event = at(i);
  125. auto event_object = array.add_object();
  126. switch (event.type) {
  127. case PERF_EVENT_SAMPLE:
  128. event_object.add("type", "sample");
  129. break;
  130. case PERF_EVENT_MALLOC:
  131. event_object.add("type", "malloc");
  132. event_object.add("ptr", static_cast<u64>(event.data.malloc.ptr));
  133. event_object.add("size", static_cast<u64>(event.data.malloc.size));
  134. break;
  135. case PERF_EVENT_FREE:
  136. event_object.add("type", "free");
  137. event_object.add("ptr", static_cast<u64>(event.data.free.ptr));
  138. break;
  139. case PERF_EVENT_MMAP:
  140. event_object.add("type", "mmap");
  141. event_object.add("ptr", static_cast<u64>(event.data.mmap.ptr));
  142. event_object.add("size", static_cast<u64>(event.data.mmap.size));
  143. event_object.add("name", event.data.mmap.name);
  144. break;
  145. case PERF_EVENT_MUNMAP:
  146. event_object.add("type", "munmap");
  147. event_object.add("ptr", static_cast<u64>(event.data.munmap.ptr));
  148. event_object.add("size", static_cast<u64>(event.data.munmap.size));
  149. break;
  150. case PERF_EVENT_PROCESS_CREATE:
  151. event_object.add("type", "process_create");
  152. event_object.add("parent_pid", static_cast<u64>(event.data.process_create.parent_pid));
  153. event_object.add("executable", event.data.process_create.executable);
  154. break;
  155. case PERF_EVENT_PROCESS_EXEC:
  156. event_object.add("type", "process_exec");
  157. event_object.add("executable", event.data.process_exec.executable);
  158. break;
  159. case PERF_EVENT_PROCESS_EXIT:
  160. event_object.add("type", "process_exit");
  161. break;
  162. case PERF_EVENT_THREAD_CREATE:
  163. event_object.add("type", "thread_create");
  164. event_object.add("parent_tid", static_cast<u64>(event.data.thread_create.parent_tid));
  165. break;
  166. case PERF_EVENT_THREAD_EXIT:
  167. event_object.add("type", "thread_exit");
  168. break;
  169. }
  170. event_object.add("pid", event.pid);
  171. event_object.add("tid", event.tid);
  172. event_object.add("timestamp", event.timestamp);
  173. auto stack_array = event_object.add_array("stack");
  174. for (size_t j = 0; j < event.stack_size; ++j) {
  175. stack_array.add(event.stack[j]);
  176. }
  177. stack_array.finish();
  178. event_object.finish();
  179. }
  180. array.finish();
  181. object.finish();
  182. return true;
  183. }
  184. bool PerformanceEventBuffer::to_json(KBufferBuilder& builder) const
  185. {
  186. JsonObjectSerializer object(builder);
  187. return to_json_impl(object);
  188. }
  189. OwnPtr<PerformanceEventBuffer> PerformanceEventBuffer::try_create_with_size(size_t buffer_size)
  190. {
  191. auto buffer = KBuffer::try_create_with_size(buffer_size, Region::Access::Read | Region::Access::Write, "Performance events", AllocationStrategy::AllocateNow);
  192. if (!buffer)
  193. return {};
  194. return adopt_own(*new PerformanceEventBuffer(buffer.release_nonnull()));
  195. }
  196. void PerformanceEventBuffer::add_process(const Process& process, ProcessEventType event_type)
  197. {
  198. ScopedSpinLock locker(process.space().get_lock());
  199. String executable;
  200. if (process.executable())
  201. executable = process.executable()->absolute_path();
  202. else
  203. executable = String::formatted("<{}>", process.name());
  204. [[maybe_unused]] auto rc = append_with_eip_and_ebp(process.pid(), 0, 0, 0,
  205. event_type == ProcessEventType::Create ? PERF_EVENT_PROCESS_CREATE : PERF_EVENT_PROCESS_EXEC,
  206. process.pid().value(), 0, executable);
  207. process.for_each_thread([&](auto& thread) {
  208. [[maybe_unused]] auto rc = append_with_eip_and_ebp(process.pid(), thread.tid().value(),
  209. 0, 0, PERF_EVENT_THREAD_CREATE, 0, 0, nullptr);
  210. return IterationDecision::Continue;
  211. });
  212. for (auto& region : process.space().regions()) {
  213. [[maybe_unused]] auto rc = append_with_eip_and_ebp(process.pid(), 0,
  214. 0, 0, PERF_EVENT_MMAP, region->range().base().get(), region->range().size(), region->name());
  215. }
  216. }
  217. }