PerformanceEventBuffer.cpp 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/JsonArraySerializer.h>
  27. #include <AK/JsonObjectSerializer.h>
  28. #include <AK/JsonObject.h>
  29. #include <Kernel/KBufferBuilder.h>
  30. #include <Kernel/PerformanceEventBuffer.h>
  31. namespace Kernel {
  32. PerformanceEventBuffer::PerformanceEventBuffer()
  33. : m_buffer(KBuffer::create_with_size(4 * MB))
  34. {
  35. }
  36. KResult PerformanceEventBuffer::append(int type, uintptr_t arg1, uintptr_t arg2)
  37. {
  38. if (count() >= capacity())
  39. return KResult(-ENOBUFS);
  40. PerformanceEvent event;
  41. event.type = type;
  42. switch (type) {
  43. case PERF_EVENT_MALLOC:
  44. event.data.malloc.size = arg1;
  45. event.data.malloc.ptr = arg2;
  46. #ifdef VERY_DEBUG
  47. dbg() << "PERF_EVENT_MALLOC: " << (void*)event.data.malloc.ptr << " (" << event.data.malloc.size << ")";
  48. #endif
  49. break;
  50. case PERF_EVENT_FREE:
  51. event.data.free.ptr = arg1;
  52. #ifdef VERY_DEBUG
  53. dbg() << "PERF_EVENT_FREE: " << (void*)event.data.free.ptr;
  54. #endif
  55. break;
  56. default:
  57. return KResult(-EINVAL);
  58. }
  59. uintptr_t ebp;
  60. asm volatile("movl %%ebp, %%eax"
  61. : "=a"(ebp));
  62. //copy_from_user(&ebp, (uintptr_t*)current->get_register_dump_from_stack().ebp);
  63. Vector<uintptr_t> backtrace;
  64. {
  65. SmapDisabler disabler;
  66. backtrace = Thread::current->raw_backtrace(ebp);
  67. }
  68. event.stack_size = min(sizeof(event.stack) / sizeof(uintptr_t), static_cast<size_t>(backtrace.size()));
  69. memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(uintptr_t));
  70. #ifdef VERY_DEBUG
  71. for (size_t i = 0; i < event.stack_size; ++i)
  72. dbg() << " " << (void*)event.stack[i];
  73. #endif
  74. event.timestamp = g_uptime;
  75. at(m_count++) = event;
  76. return KSuccess;
  77. }
  78. PerformanceEvent& PerformanceEventBuffer::at(size_t index)
  79. {
  80. ASSERT(index < capacity());
  81. auto* events = reinterpret_cast<PerformanceEvent*>(m_buffer.data());
  82. return events[index];
  83. }
  84. KBuffer PerformanceEventBuffer::to_json(pid_t pid, const String& executable_path) const
  85. {
  86. KBufferBuilder builder;
  87. JsonObjectSerializer object(builder);
  88. object.add("pid", pid);
  89. object.add("executable", executable_path);
  90. auto array = object.add_array("events");
  91. for (size_t i = 0; i < m_count; ++i) {
  92. auto& event = at(i);
  93. auto event_object = array.add_object();
  94. switch (event.type) {
  95. case PERF_EVENT_MALLOC:
  96. event_object.add("type", "malloc");
  97. event_object.add("ptr", static_cast<u64>(event.data.malloc.ptr));
  98. event_object.add("size", static_cast<u64>(event.data.malloc.size));
  99. break;
  100. case PERF_EVENT_FREE:
  101. event_object.add("type", "free");
  102. event_object.add("ptr", static_cast<u64>(event.data.free.ptr));
  103. break;
  104. }
  105. event_object.add("timestamp", event.timestamp);
  106. auto stack_array = event_object.add_array("stack");
  107. for (size_t j = 0; j < event.stack_size; ++j) {
  108. stack_array.add(event.stack[j]);
  109. }
  110. stack_array.finish();
  111. event_object.finish();
  112. }
  113. array.finish();
  114. object.finish();
  115. return builder.build();
  116. }
  117. }