MallocTracer.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Tobias Christiansen <tobyase@serenityos.org>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include "MallocTracer.h"
  8. #include "Emulator.h"
  9. #include "MmapRegion.h"
  10. #include <AK/Debug.h>
  11. #include <AK/TemporaryChange.h>
  12. #include <mallocdefs.h>
  13. #include <string.h>
  14. #include <unistd.h>
  15. namespace UserspaceEmulator {
  16. MallocTracer::MallocTracer(Emulator& emulator)
  17. : m_emulator(emulator)
  18. {
  19. }
  20. template<typename Callback>
  21. inline void MallocTracer::for_each_mallocation(Callback callback) const
  22. {
  23. m_emulator.mmu().for_each_region([&](auto& region) {
  24. if (is<MmapRegion>(region) && static_cast<const MmapRegion&>(region).is_malloc_block()) {
  25. auto* malloc_data = static_cast<MmapRegion&>(region).malloc_metadata();
  26. for (auto& mallocation : malloc_data->mallocations) {
  27. if (mallocation.used && callback(mallocation) == IterationDecision::Break)
  28. return IterationDecision::Break;
  29. }
  30. }
  31. return IterationDecision::Continue;
  32. });
  33. }
  34. void MallocTracer::update_metadata(MmapRegion& mmap_region, size_t chunk_size)
  35. {
  36. mmap_region.set_malloc_metadata({},
  37. adopt_own(*new MallocRegionMetadata {
  38. .region = mmap_region,
  39. .address = mmap_region.base(),
  40. .chunk_size = chunk_size,
  41. .mallocations = {},
  42. }));
  43. auto& malloc_data = *mmap_region.malloc_metadata();
  44. bool is_chunked_block = malloc_data.chunk_size <= size_classes[num_size_classes - 1];
  45. if (is_chunked_block)
  46. malloc_data.mallocations.resize((ChunkedBlock::block_size - sizeof(ChunkedBlock)) / malloc_data.chunk_size);
  47. else
  48. malloc_data.mallocations.resize(1);
  49. // Mark the containing mmap region as a malloc block!
  50. mmap_region.set_malloc(true);
  51. }
  52. void MallocTracer::target_did_malloc(Badge<Emulator>, FlatPtr address, size_t size)
  53. {
  54. if (m_emulator.is_in_loader_code())
  55. return;
  56. auto* region = m_emulator.mmu().find_region({ 0x23, address });
  57. VERIFY(region);
  58. auto& mmap_region = verify_cast<MmapRegion>(*region);
  59. auto* shadow_bits = mmap_region.shadow_data() + address - mmap_region.base();
  60. memset(shadow_bits, 0, size);
  61. if (auto* existing_mallocation = find_mallocation(address)) {
  62. VERIFY(existing_mallocation->freed);
  63. existing_mallocation->size = size;
  64. existing_mallocation->freed = false;
  65. existing_mallocation->malloc_backtrace = m_emulator.raw_backtrace();
  66. existing_mallocation->free_backtrace.clear();
  67. return;
  68. }
  69. if (!mmap_region.is_malloc_block()) {
  70. auto chunk_size = mmap_region.read32(offsetof(CommonHeader, m_size)).value();
  71. update_metadata(mmap_region, chunk_size);
  72. }
  73. auto* mallocation = mmap_region.malloc_metadata()->mallocation_for_address(address);
  74. VERIFY(mallocation);
  75. *mallocation = { address, size, true, false, m_emulator.raw_backtrace(), Vector<FlatPtr>() };
  76. }
  77. void MallocTracer::target_did_change_chunk_size(Badge<Emulator>, FlatPtr block, size_t chunk_size)
  78. {
  79. if (m_emulator.is_in_loader_code())
  80. return;
  81. auto* region = m_emulator.mmu().find_region({ 0x23, block });
  82. VERIFY(region);
  83. auto& mmap_region = verify_cast<MmapRegion>(*region);
  84. update_metadata(mmap_region, chunk_size);
  85. }
  86. ALWAYS_INLINE Mallocation* MallocRegionMetadata::mallocation_for_address(FlatPtr address) const
  87. {
  88. auto index = chunk_index_for_address(address);
  89. if (!index.has_value())
  90. return nullptr;
  91. return &const_cast<Mallocation&>(this->mallocations[index.value()]);
  92. }
  93. ALWAYS_INLINE Optional<size_t> MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
  94. {
  95. bool is_chunked_block = chunk_size <= size_classes[num_size_classes - 1];
  96. if (!is_chunked_block) {
  97. // This is a BigAllocationBlock
  98. return 0;
  99. }
  100. auto offset_into_block = address - this->address;
  101. if (offset_into_block < sizeof(ChunkedBlock))
  102. return 0;
  103. auto chunk_offset = offset_into_block - sizeof(ChunkedBlock);
  104. auto chunk_index = chunk_offset / this->chunk_size;
  105. if (chunk_index >= mallocations.size())
  106. return {};
  107. return chunk_index;
  108. }
  109. void MallocTracer::target_did_free(Badge<Emulator>, FlatPtr address)
  110. {
  111. if (!address)
  112. return;
  113. if (m_emulator.is_in_loader_code())
  114. return;
  115. if (auto* mallocation = find_mallocation(address)) {
  116. if (mallocation->freed) {
  117. reportln("\n=={}== \033[31;1mDouble free()\033[0m, {:p}", getpid(), address);
  118. reportln("=={}== Address {} has already been passed to free()", getpid(), address);
  119. m_emulator.dump_backtrace();
  120. } else {
  121. mallocation->freed = true;
  122. mallocation->free_backtrace = m_emulator.raw_backtrace();
  123. }
  124. return;
  125. }
  126. reportln("\n=={}== \033[31;1mInvalid free()\033[0m, {:p}", getpid(), address);
  127. reportln("=={}== Address {} has never been returned by malloc()", getpid(), address);
  128. m_emulator.dump_backtrace();
  129. }
  130. void MallocTracer::target_did_realloc(Badge<Emulator>, FlatPtr address, size_t size)
  131. {
  132. if (m_emulator.is_in_loader_code())
  133. return;
  134. auto* region = m_emulator.mmu().find_region({ 0x23, address });
  135. VERIFY(region);
  136. auto& mmap_region = verify_cast<MmapRegion>(*region);
  137. VERIFY(mmap_region.is_malloc_block());
  138. auto* existing_mallocation = find_mallocation(address);
  139. VERIFY(existing_mallocation);
  140. VERIFY(!existing_mallocation->freed);
  141. size_t old_size = existing_mallocation->size;
  142. auto* shadow_bits = mmap_region.shadow_data() + address - mmap_region.base();
  143. if (size > old_size) {
  144. memset(shadow_bits + old_size, 1, size - old_size);
  145. } else {
  146. memset(shadow_bits + size, 1, old_size - size);
  147. }
  148. existing_mallocation->size = size;
  149. // FIXME: Should we track malloc/realloc backtrace separately perhaps?
  150. existing_mallocation->malloc_backtrace = m_emulator.raw_backtrace();
  151. }
  152. Mallocation* MallocTracer::find_mallocation(FlatPtr address)
  153. {
  154. auto* region = m_emulator.mmu().find_region({ 0x23, address });
  155. if (!region)
  156. return nullptr;
  157. return find_mallocation(*region, address);
  158. }
  159. Mallocation* MallocTracer::find_mallocation_before(FlatPtr address)
  160. {
  161. Mallocation* found_mallocation = nullptr;
  162. for_each_mallocation([&](auto& mallocation) {
  163. if (mallocation.address >= address)
  164. return IterationDecision::Continue;
  165. if (!found_mallocation || (mallocation.address > found_mallocation->address))
  166. found_mallocation = const_cast<Mallocation*>(&mallocation);
  167. return IterationDecision::Continue;
  168. });
  169. return found_mallocation;
  170. }
  171. Mallocation* MallocTracer::find_mallocation_after(FlatPtr address)
  172. {
  173. Mallocation* found_mallocation = nullptr;
  174. for_each_mallocation([&](auto& mallocation) {
  175. if (mallocation.address <= address)
  176. return IterationDecision::Continue;
  177. if (!found_mallocation || (mallocation.address < found_mallocation->address))
  178. found_mallocation = const_cast<Mallocation*>(&mallocation);
  179. return IterationDecision::Continue;
  180. });
  181. return found_mallocation;
  182. }
  183. void MallocTracer::audit_read(const Region& region, FlatPtr address, size_t size)
  184. {
  185. if (!m_auditing_enabled)
  186. return;
  187. if (m_emulator.is_memory_auditing_suppressed()) {
  188. return;
  189. }
  190. if (m_emulator.is_in_libsystem()) {
  191. return;
  192. }
  193. if (m_emulator.is_in_loader_code()) {
  194. return;
  195. }
  196. auto* mallocation = find_mallocation(region, address);
  197. if (!mallocation) {
  198. reportln("\n=={}== \033[31;1mHeap buffer overflow\033[0m, invalid {}-byte read at address {:p}", getpid(), size, address);
  199. m_emulator.dump_backtrace();
  200. auto* mallocation_before = find_mallocation_before(address);
  201. auto* mallocation_after = find_mallocation_after(address);
  202. size_t distance_to_mallocation_before = mallocation_before ? (address - mallocation_before->address - mallocation_before->size) : 0;
  203. size_t distance_to_mallocation_after = mallocation_after ? (mallocation_after->address - address) : 0;
  204. if (mallocation_before && (!mallocation_after || distance_to_mallocation_before < distance_to_mallocation_after)) {
  205. reportln("=={}== Address is {} byte(s) after block of size {}, identity {:p}, allocated at:", getpid(), distance_to_mallocation_before, mallocation_before->size, mallocation_before->address);
  206. m_emulator.dump_backtrace(mallocation_before->malloc_backtrace);
  207. return;
  208. }
  209. if (mallocation_after && (!mallocation_before || distance_to_mallocation_after < distance_to_mallocation_before)) {
  210. reportln("=={}== Address is {} byte(s) before block of size {}, identity {:p}, allocated at:", getpid(), distance_to_mallocation_after, mallocation_after->size, mallocation_after->address);
  211. m_emulator.dump_backtrace(mallocation_after->malloc_backtrace);
  212. }
  213. return;
  214. }
  215. size_t offset_into_mallocation = address - mallocation->address;
  216. if (mallocation->freed) {
  217. reportln("\n=={}== \033[31;1mUse-after-free\033[0m, invalid {}-byte read at address {:p}", getpid(), size, address);
  218. m_emulator.dump_backtrace();
  219. reportln("=={}== Address is {} byte(s) into block of size {}, allocated at:", getpid(), offset_into_mallocation, mallocation->size);
  220. m_emulator.dump_backtrace(mallocation->malloc_backtrace);
  221. reportln("=={}== Later freed at:", getpid());
  222. m_emulator.dump_backtrace(mallocation->free_backtrace);
  223. return;
  224. }
  225. }
  226. void MallocTracer::audit_write(const Region& region, FlatPtr address, size_t size)
  227. {
  228. if (!m_auditing_enabled)
  229. return;
  230. if (m_emulator.is_memory_auditing_suppressed()) {
  231. return;
  232. }
  233. if (m_emulator.is_in_loader_code()) {
  234. return;
  235. }
  236. auto* mallocation = find_mallocation(region, address);
  237. if (!mallocation) {
  238. reportln("\n=={}== \033[31;1mHeap buffer overflow\033[0m, invalid {}-byte write at address {:p}", getpid(), size, address);
  239. m_emulator.dump_backtrace();
  240. auto* mallocation_before = find_mallocation_before(address);
  241. auto* mallocation_after = find_mallocation_after(address);
  242. size_t distance_to_mallocation_before = mallocation_before ? (address - mallocation_before->address - mallocation_before->size) : 0;
  243. size_t distance_to_mallocation_after = mallocation_after ? (mallocation_after->address - address) : 0;
  244. if (mallocation_before && (!mallocation_after || distance_to_mallocation_before < distance_to_mallocation_after)) {
  245. reportln("=={}== Address is {} byte(s) after block of size {}, identity {:p}, allocated at:", getpid(), distance_to_mallocation_before, mallocation_before->size, mallocation_before->address);
  246. m_emulator.dump_backtrace(mallocation_before->malloc_backtrace);
  247. return;
  248. }
  249. if (mallocation_after && (!mallocation_before || distance_to_mallocation_after < distance_to_mallocation_before)) {
  250. reportln("=={}== Address is {} byte(s) before block of size {}, identity {:p}, allocated at:", getpid(), distance_to_mallocation_after, mallocation_after->size, mallocation_after->address);
  251. m_emulator.dump_backtrace(mallocation_after->malloc_backtrace);
  252. }
  253. return;
  254. }
  255. size_t offset_into_mallocation = address - mallocation->address;
  256. if (mallocation->freed) {
  257. reportln("\n=={}== \033[31;1mUse-after-free\033[0m, invalid {}-byte write at address {:p}", getpid(), size, address);
  258. m_emulator.dump_backtrace();
  259. reportln("=={}== Address is {} byte(s) into block of size {}, allocated at:", getpid(), offset_into_mallocation, mallocation->size);
  260. m_emulator.dump_backtrace(mallocation->malloc_backtrace);
  261. reportln("=={}== Later freed at:", getpid());
  262. m_emulator.dump_backtrace(mallocation->free_backtrace);
  263. return;
  264. }
  265. }
  266. void MallocTracer::populate_memory_graph()
  267. {
  268. // Create Node for each live Mallocation
  269. for_each_mallocation([&](auto& mallocation) {
  270. if (mallocation.freed)
  271. return IterationDecision::Continue;
  272. m_memory_graph.set(mallocation.address, {});
  273. return IterationDecision::Continue;
  274. });
  275. // Find pointers from each memory region to another
  276. for_each_mallocation([&](auto& mallocation) {
  277. if (mallocation.freed)
  278. return IterationDecision::Continue;
  279. size_t pointers_in_mallocation = mallocation.size / sizeof(u32);
  280. auto& edges_from_mallocation = m_memory_graph.find(mallocation.address)->value;
  281. for (size_t i = 0; i < pointers_in_mallocation; ++i) {
  282. auto value = m_emulator.mmu().read32({ 0x23, mallocation.address + i * sizeof(u32) });
  283. auto other_address = value.value();
  284. if (!value.is_uninitialized() && m_memory_graph.contains(value.value())) {
  285. if constexpr (REACHABLE_DEBUG)
  286. reportln("region/mallocation {:p} is reachable from other mallocation {:p}", other_address, mallocation.address);
  287. edges_from_mallocation.edges_from_node.append(other_address);
  288. }
  289. }
  290. return IterationDecision::Continue;
  291. });
  292. // Find mallocations that are pointed to by other regions
  293. Vector<FlatPtr> reachable_mallocations = {};
  294. m_emulator.mmu().for_each_region([&](auto& region) {
  295. // Skip the stack
  296. if (region.is_stack())
  297. return IterationDecision::Continue;
  298. if (region.is_text())
  299. return IterationDecision::Continue;
  300. if (!region.is_readable())
  301. return IterationDecision::Continue;
  302. // Skip malloc blocks
  303. if (is<MmapRegion>(region) && static_cast<const MmapRegion&>(region).is_malloc_block())
  304. return IterationDecision::Continue;
  305. size_t pointers_in_region = region.size() / sizeof(u32);
  306. for (size_t i = 0; i < pointers_in_region; ++i) {
  307. auto value = region.read32(i * sizeof(u32));
  308. auto other_address = value.value();
  309. if (!value.is_uninitialized() && m_memory_graph.contains(value.value())) {
  310. if constexpr (REACHABLE_DEBUG)
  311. reportln("region/mallocation {:p} is reachable from region {:p}-{:p}", other_address, region.base(), region.end() - 1);
  312. m_memory_graph.find(other_address)->value.is_reachable = true;
  313. reachable_mallocations.append(other_address);
  314. }
  315. }
  316. return IterationDecision::Continue;
  317. });
  318. // Propagate reachability
  319. // There are probably better ways to do that
  320. Vector<FlatPtr> visited = {};
  321. for (size_t i = 0; i < reachable_mallocations.size(); ++i) {
  322. auto reachable = reachable_mallocations.at(i);
  323. if (visited.contains_slow(reachable))
  324. continue;
  325. visited.append(reachable);
  326. auto& mallocation_node = m_memory_graph.find(reachable)->value;
  327. if (!mallocation_node.is_reachable)
  328. mallocation_node.is_reachable = true;
  329. for (auto& edge : mallocation_node.edges_from_node) {
  330. reachable_mallocations.append(edge);
  331. }
  332. }
  333. }
  334. void MallocTracer::dump_memory_graph()
  335. {
  336. for (auto& key : m_memory_graph.keys()) {
  337. auto value = m_memory_graph.find(key)->value;
  338. dbgln("Block {:p} [{}reachable] ({} edges)", key, !value.is_reachable ? "not " : "", value.edges_from_node.size());
  339. for (auto& edge : value.edges_from_node) {
  340. dbgln(" -> {:p}", edge);
  341. }
  342. }
  343. }
  344. void MallocTracer::dump_leak_report()
  345. {
  346. TemporaryChange change(m_auditing_enabled, false);
  347. size_t bytes_leaked = 0;
  348. size_t leaks_found = 0;
  349. populate_memory_graph();
  350. if constexpr (REACHABLE_DEBUG)
  351. dump_memory_graph();
  352. for_each_mallocation([&](auto& mallocation) {
  353. if (mallocation.freed)
  354. return IterationDecision::Continue;
  355. auto& value = m_memory_graph.find(mallocation.address)->value;
  356. if (value.is_reachable)
  357. return IterationDecision::Continue;
  358. ++leaks_found;
  359. bytes_leaked += mallocation.size;
  360. reportln("\n=={}== \033[31;1mLeak\033[0m, {}-byte allocation at address {:p}", getpid(), mallocation.size, mallocation.address);
  361. m_emulator.dump_backtrace(mallocation.malloc_backtrace);
  362. return IterationDecision::Continue;
  363. });
  364. if (!leaks_found)
  365. reportln("\n=={}== \033[32;1mNo leaks found!\033[0m", getpid());
  366. else
  367. reportln("\n=={}== \033[31;1m{} leak(s) found: {} byte(s) leaked\033[0m", getpid(), leaks_found, bytes_leaked);
  368. }
  369. }