PerformanceEventBuffer.cpp 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * Copyright (c) 2020-2021, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/JsonArraySerializer.h>
  27. #include <AK/JsonObject.h>
  28. #include <AK/JsonObjectSerializer.h>
  29. #include <Kernel/Arch/x86/SmapDisabler.h>
  30. #include <Kernel/FileSystem/Custody.h>
  31. #include <Kernel/KBufferBuilder.h>
  32. #include <Kernel/PerformanceEventBuffer.h>
  33. #include <Kernel/Process.h>
  34. namespace Kernel {
  35. PerformanceEventBuffer::PerformanceEventBuffer(NonnullOwnPtr<KBuffer> buffer)
  36. : m_buffer(move(buffer))
  37. {
  38. }
  39. KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2)
  40. {
  41. FlatPtr ebp;
  42. asm volatile("movl %%ebp, %%eax"
  43. : "=a"(ebp));
  44. auto current_thread = Thread::current();
  45. auto eip = current_thread->get_register_dump_from_stack().eip;
  46. return append_with_eip_and_ebp(eip, ebp, type, arg1, arg2);
  47. }
  48. static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(FlatPtr ebp, FlatPtr eip)
  49. {
  50. Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace;
  51. backtrace.append(eip);
  52. FlatPtr stack_ptr_copy;
  53. FlatPtr stack_ptr = (FlatPtr)ebp;
  54. // FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks.
  55. SmapDisabler disabler;
  56. while (stack_ptr) {
  57. void* fault_at;
  58. if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
  59. break;
  60. FlatPtr retaddr;
  61. if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
  62. break;
  63. backtrace.append(retaddr);
  64. if (backtrace.size() == PerformanceEvent::max_stack_frame_count)
  65. break;
  66. stack_ptr = stack_ptr_copy;
  67. }
  68. return backtrace;
  69. }
  70. KResult PerformanceEventBuffer::append_with_eip_and_ebp(u32 eip, u32 ebp, int type, FlatPtr arg1, FlatPtr arg2)
  71. {
  72. if (count() >= capacity())
  73. return ENOBUFS;
  74. PerformanceEvent event;
  75. event.type = type;
  76. switch (type) {
  77. case PERF_EVENT_SAMPLE:
  78. break;
  79. case PERF_EVENT_MALLOC:
  80. event.data.malloc.size = arg1;
  81. event.data.malloc.ptr = arg2;
  82. break;
  83. case PERF_EVENT_FREE:
  84. event.data.free.ptr = arg1;
  85. break;
  86. default:
  87. return EINVAL;
  88. }
  89. auto backtrace = raw_backtrace(ebp, eip);
  90. event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size()));
  91. memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr));
  92. event.tid = Thread::current()->tid().value();
  93. event.timestamp = TimeManagement::the().uptime_ms();
  94. at(m_count++) = event;
  95. return KSuccess;
  96. }
  97. PerformanceEvent& PerformanceEventBuffer::at(size_t index)
  98. {
  99. VERIFY(index < capacity());
  100. auto* events = reinterpret_cast<PerformanceEvent*>(m_buffer->data());
  101. return events[index];
  102. }
  103. template<typename Serializer>
  104. bool PerformanceEventBuffer::to_json_impl(Serializer& object) const
  105. {
  106. auto array = object.add_array("events");
  107. for (size_t i = 0; i < m_count; ++i) {
  108. auto& event = at(i);
  109. auto event_object = array.add_object();
  110. switch (event.type) {
  111. case PERF_EVENT_SAMPLE:
  112. event_object.add("type", "sample");
  113. break;
  114. case PERF_EVENT_MALLOC:
  115. event_object.add("type", "malloc");
  116. event_object.add("ptr", static_cast<u64>(event.data.malloc.ptr));
  117. event_object.add("size", static_cast<u64>(event.data.malloc.size));
  118. break;
  119. case PERF_EVENT_FREE:
  120. event_object.add("type", "free");
  121. event_object.add("ptr", static_cast<u64>(event.data.free.ptr));
  122. break;
  123. }
  124. event_object.add("tid", event.tid);
  125. event_object.add("timestamp", event.timestamp);
  126. auto stack_array = event_object.add_array("stack");
  127. for (size_t j = 0; j < event.stack_size; ++j) {
  128. stack_array.add(event.stack[j]);
  129. }
  130. stack_array.finish();
  131. event_object.finish();
  132. }
  133. array.finish();
  134. object.finish();
  135. return true;
  136. }
  137. bool PerformanceEventBuffer::to_json(KBufferBuilder& builder) const
  138. {
  139. JsonObjectSerializer object(builder);
  140. auto processes_array = object.add_array("processes");
  141. for (auto& it : m_processes) {
  142. auto& process = *it.value;
  143. auto process_object = processes_array.add_object();
  144. process_object.add("pid", process.pid.value());
  145. process_object.add("executable", process.executable);
  146. auto regions_array = process_object.add_array("regions");
  147. for (auto& region : process.regions) {
  148. auto region_object = regions_array.add_object();
  149. region_object.add("name", region.name);
  150. region_object.add("base", region.range.base().get());
  151. region_object.add("size", region.range.size());
  152. }
  153. }
  154. processes_array.finish();
  155. return to_json_impl(object);
  156. }
  157. OwnPtr<PerformanceEventBuffer> PerformanceEventBuffer::try_create_with_size(size_t buffer_size)
  158. {
  159. auto buffer = KBuffer::try_create_with_size(buffer_size, Region::Access::Read | Region::Access::Write, "Performance events", AllocationStrategy::AllocateNow);
  160. if (!buffer)
  161. return {};
  162. return adopt_own(*new PerformanceEventBuffer(buffer.release_nonnull()));
  163. }
  164. void PerformanceEventBuffer::add_process(const Process& process)
  165. {
  166. // FIXME: What about threads that have died?
  167. ScopedSpinLock locker(process.space().get_lock());
  168. String executable;
  169. if (process.executable())
  170. executable = process.executable()->absolute_path();
  171. auto sampled_process = adopt_own(*new SampledProcess {
  172. .pid = process.pid().value(),
  173. .executable = executable,
  174. .threads = {},
  175. .regions = {},
  176. });
  177. process.for_each_thread([&](auto& thread) {
  178. sampled_process->threads.set(thread.tid());
  179. return IterationDecision::Continue;
  180. });
  181. for (auto& region : process.space().regions()) {
  182. sampled_process->regions.append(SampledProcess::Region {
  183. .name = region->name(),
  184. .range = region->range(),
  185. });
  186. }
  187. m_processes.set(process.pid(), move(sampled_process));
  188. }
  189. }