KSyms.cpp 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/TemporaryChange.h>
  7. #include <Kernel/Arch/SafeMem.h>
  8. #include <Kernel/Arch/SmapDisabler.h>
  9. #include <Kernel/FileSystem/OpenFileDescription.h>
  10. #include <Kernel/KSyms.h>
  11. #include <Kernel/Sections.h>
  12. #include <Kernel/Tasks/Process.h>
  13. #include <Kernel/Tasks/Scheduler.h>
  14. namespace Kernel {
  15. FlatPtr g_lowest_kernel_symbol_address = 0xffffffff;
  16. FlatPtr g_highest_kernel_symbol_address = 0;
  17. bool g_kernel_symbols_available = false;
  18. extern "C" {
  19. __attribute__((section(".kernel_symbols"))) char kernel_symbols[5 * MiB] {};
  20. }
  21. static KernelSymbol* s_symbols;
  22. static size_t s_symbol_count = 0;
  23. UNMAP_AFTER_INIT static u8 parse_hex_digit(char nibble)
  24. {
  25. if (nibble >= '0' && nibble <= '9')
  26. return nibble - '0';
  27. VERIFY(nibble >= 'a' && nibble <= 'f');
  28. return 10 + (nibble - 'a');
  29. }
  30. FlatPtr address_for_kernel_symbol(StringView name)
  31. {
  32. for (size_t i = 0; i < s_symbol_count; ++i) {
  33. auto const& symbol = s_symbols[i];
  34. if (name == symbol.name)
  35. return symbol.address;
  36. }
  37. return 0;
  38. }
  39. KernelSymbol const* symbolicate_kernel_address(FlatPtr address)
  40. {
  41. if (address < g_lowest_kernel_symbol_address || address > g_highest_kernel_symbol_address)
  42. return nullptr;
  43. for (unsigned i = 0; i < s_symbol_count; ++i) {
  44. if (address < s_symbols[i + 1].address)
  45. return &s_symbols[i];
  46. }
  47. return nullptr;
  48. }
  49. UNMAP_AFTER_INIT static void load_kernel_symbols_from_data(Bytes buffer)
  50. {
  51. g_lowest_kernel_symbol_address = 0xffffffff;
  52. g_highest_kernel_symbol_address = 0;
  53. auto* bufptr = (char*)buffer.data();
  54. auto* start_of_name = bufptr;
  55. FlatPtr address = 0;
  56. for (size_t i = 0; i < 8; ++i)
  57. s_symbol_count = (s_symbol_count << 4) | parse_hex_digit(*(bufptr++));
  58. s_symbols = static_cast<KernelSymbol*>(kmalloc(sizeof(KernelSymbol) * s_symbol_count));
  59. ++bufptr; // skip newline
  60. dmesgln("Loading kernel symbol table...");
  61. size_t current_symbol_index = 0;
  62. while ((u8 const*)bufptr < buffer.data() + buffer.size()) {
  63. for (size_t i = 0; i < sizeof(void*) * 2; ++i)
  64. address = (address << 4) | parse_hex_digit(*(bufptr++));
  65. bufptr += 3;
  66. start_of_name = bufptr;
  67. while (*(++bufptr)) {
  68. if (*bufptr == '\n') {
  69. break;
  70. }
  71. }
  72. auto& ksym = s_symbols[current_symbol_index];
  73. // FIXME: Remove this ifdef once the aarch64 kernel is loaded by the Prekernel.
  74. // Currently, the aarch64 kernel is linked at a high virtual memory address, instead
  75. // of zero, so the address of a symbol does not need to be offset by the kernel_load_base.
  76. #if ARCH(X86_64)
  77. ksym.address = kernel_load_base + address;
  78. #elif ARCH(AARCH64)
  79. ksym.address = address;
  80. #else
  81. # error "Unknown architecture"
  82. #endif
  83. ksym.name = start_of_name;
  84. *bufptr = '\0';
  85. if (ksym.address < g_lowest_kernel_symbol_address)
  86. g_lowest_kernel_symbol_address = ksym.address;
  87. if (ksym.address > g_highest_kernel_symbol_address)
  88. g_highest_kernel_symbol_address = ksym.address;
  89. ++bufptr;
  90. ++current_symbol_index;
  91. }
  92. g_kernel_symbols_available = true;
  93. }
  94. NEVER_INLINE static void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksyms, PrintToScreen print_to_screen)
  95. {
  96. #define PRINT_LINE(fmtstr, ...) \
  97. do { \
  98. if (print_to_screen == PrintToScreen::No) \
  99. dbgln(fmtstr, __VA_ARGS__); \
  100. else \
  101. critical_dmesgln(fmtstr, __VA_ARGS__); \
  102. } while (0)
  103. SmapDisabler disabler;
  104. if (use_ksyms && !g_kernel_symbols_available)
  105. Processor::halt();
  106. struct RecognizedSymbol {
  107. FlatPtr address;
  108. KernelSymbol const* symbol { nullptr };
  109. };
  110. constexpr size_t max_recognized_symbol_count = 256;
  111. RecognizedSymbol recognized_symbols[max_recognized_symbol_count];
  112. size_t recognized_symbol_count = 0;
  113. if (use_ksyms) {
  114. FlatPtr copied_stack_ptr[2];
  115. for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer; stack_ptr && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)copied_stack_ptr[0]) {
  116. if ((FlatPtr)stack_ptr < kernel_mapping_base)
  117. break;
  118. void* fault_at;
  119. if (!safe_memcpy(copied_stack_ptr, stack_ptr, sizeof(copied_stack_ptr), fault_at))
  120. break;
  121. FlatPtr retaddr = copied_stack_ptr[1];
  122. recognized_symbols[recognized_symbol_count++] = { retaddr, symbolicate_kernel_address(retaddr) };
  123. }
  124. } else {
  125. void* fault_at;
  126. FlatPtr copied_stack_ptr[2];
  127. FlatPtr* stack_ptr = (FlatPtr*)base_pointer;
  128. while (stack_ptr && safe_memcpy(copied_stack_ptr, stack_ptr, sizeof(copied_stack_ptr), fault_at)) {
  129. FlatPtr retaddr = copied_stack_ptr[1];
  130. PRINT_LINE("{:p} (next: {:p})", retaddr, stack_ptr ? (FlatPtr*)copied_stack_ptr[0] : 0);
  131. stack_ptr = (FlatPtr*)copied_stack_ptr[0];
  132. }
  133. return;
  134. }
  135. VERIFY(recognized_symbol_count <= max_recognized_symbol_count);
  136. for (size_t i = 0; i < recognized_symbol_count; ++i) {
  137. auto& symbol = recognized_symbols[i];
  138. if (!symbol.address)
  139. break;
  140. if (!symbol.symbol) {
  141. PRINT_LINE("Kernel + {:p}", symbol.address - kernel_load_base);
  142. continue;
  143. }
  144. size_t offset = symbol.address - symbol.symbol->address;
  145. if (symbol.symbol->address == g_highest_kernel_symbol_address && offset > 4096)
  146. PRINT_LINE("Kernel + {:p}", symbol.address - kernel_load_base);
  147. else
  148. PRINT_LINE("Kernel + {:p} {} +{:#x}", symbol.address - kernel_load_base, symbol.symbol->name, offset);
  149. }
  150. }
  151. void dump_backtrace_from_base_pointer(FlatPtr base_pointer)
  152. {
  153. // FIXME: Change signature of dump_backtrace_impl to use an enum instead of a bool.
  154. dump_backtrace_impl(base_pointer, /*use_ksym=*/false, PrintToScreen::No);
  155. }
  156. void dump_backtrace(PrintToScreen print_to_screen)
  157. {
  158. static bool in_dump_backtrace = false;
  159. if (in_dump_backtrace)
  160. return;
  161. TemporaryChange change(in_dump_backtrace, true);
  162. TemporaryChange disable_kmalloc_stacks(g_dump_kmalloc_stacks, false);
  163. FlatPtr base_pointer = (FlatPtr)__builtin_frame_address(0);
  164. dump_backtrace_impl(base_pointer, g_kernel_symbols_available, print_to_screen);
  165. }
  166. UNMAP_AFTER_INIT void load_kernel_symbol_table()
  167. {
  168. auto kernel_symbols_size = strnlen(kernel_symbols, sizeof(kernel_symbols));
  169. // If we're hitting this VERIFY the kernel symbol file has grown beyond
  170. // the array size of kernel_symbols. Try making the array larger.
  171. VERIFY(kernel_symbols_size != sizeof(kernel_symbols));
  172. load_kernel_symbols_from_data({ kernel_symbols, kernel_symbols_size });
  173. }
  174. }