Heap.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2023, Aliaksandr Kalenik <kalenik.aliaksandr@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/Badge.h>
  8. #include <AK/Debug.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/JsonArray.h>
  11. #include <AK/JsonObject.h>
  12. #include <AK/StackInfo.h>
  13. #include <AK/TemporaryChange.h>
  14. #include <LibCore/ElapsedTimer.h>
  15. #include <LibJS/Bytecode/Interpreter.h>
  16. #include <LibJS/Heap/CellAllocator.h>
  17. #include <LibJS/Heap/Handle.h>
  18. #include <LibJS/Heap/Heap.h>
  19. #include <LibJS/Heap/HeapBlock.h>
  20. #include <LibJS/Runtime/Object.h>
  21. #include <LibJS/Runtime/WeakContainer.h>
  22. #include <LibJS/SafeFunction.h>
  23. #include <setjmp.h>
  24. #ifdef AK_OS_SERENITY
  25. # include <serenity.h>
  26. #endif
  27. #ifdef HAS_ADDRESS_SANITIZER
  28. # include <sanitizer/asan_interface.h>
  29. #endif
  30. namespace JS {
  31. #ifdef AK_OS_SERENITY
  32. static int gc_perf_string_id;
  33. #endif
  34. // NOTE: We keep a per-thread list of custom ranges. This hinges on the assumption that there is one JS VM per thread.
  35. static __thread HashMap<FlatPtr*, size_t>* s_custom_ranges_for_conservative_scan = nullptr;
  36. static __thread HashMap<FlatPtr*, SourceLocation*>* s_safe_function_locations = nullptr;
  37. Heap::Heap(VM& vm)
  38. : HeapBase(vm)
  39. {
  40. #ifdef AK_OS_SERENITY
  41. auto gc_signpost_string = "Garbage collection"sv;
  42. gc_perf_string_id = perf_register_string(gc_signpost_string.characters_without_null_termination(), gc_signpost_string.length());
  43. #endif
  44. if constexpr (HeapBlock::min_possible_cell_size <= 16) {
  45. m_size_based_cell_allocators.append(make<CellAllocator>(16));
  46. }
  47. static_assert(HeapBlock::min_possible_cell_size <= 24, "Heap Cell tracking uses too much data!");
  48. m_size_based_cell_allocators.append(make<CellAllocator>(32));
  49. m_size_based_cell_allocators.append(make<CellAllocator>(64));
  50. m_size_based_cell_allocators.append(make<CellAllocator>(96));
  51. m_size_based_cell_allocators.append(make<CellAllocator>(128));
  52. m_size_based_cell_allocators.append(make<CellAllocator>(256));
  53. m_size_based_cell_allocators.append(make<CellAllocator>(512));
  54. m_size_based_cell_allocators.append(make<CellAllocator>(1024));
  55. m_size_based_cell_allocators.append(make<CellAllocator>(3072));
  56. }
  57. Heap::~Heap()
  58. {
  59. vm().string_cache().clear();
  60. vm().byte_string_cache().clear();
  61. collect_garbage(CollectionType::CollectEverything);
  62. }
  63. void Heap::will_allocate(size_t size)
  64. {
  65. if (should_collect_on_every_allocation()) {
  66. m_allocated_bytes_since_last_gc = 0;
  67. collect_garbage();
  68. } else if (m_allocated_bytes_since_last_gc + size > m_gc_bytes_threshold) {
  69. m_allocated_bytes_since_last_gc = 0;
  70. collect_garbage();
  71. }
  72. m_allocated_bytes_since_last_gc += size;
  73. }
  74. static void add_possible_value(HashMap<FlatPtr, HeapRoot>& possible_pointers, FlatPtr data, HeapRoot origin, FlatPtr min_block_address, FlatPtr max_block_address)
  75. {
  76. if constexpr (sizeof(FlatPtr*) == sizeof(Value)) {
  77. // Because Value stores pointers in non-canonical form we have to check if the top bytes
  78. // match any pointer-backed tag, in that case we have to extract the pointer to its
  79. // canonical form and add that as a possible pointer.
  80. FlatPtr possible_pointer;
  81. if ((data & SHIFTED_IS_CELL_PATTERN) == SHIFTED_IS_CELL_PATTERN)
  82. possible_pointer = Value::extract_pointer_bits(data);
  83. else
  84. possible_pointer = data;
  85. if (possible_pointer < min_block_address || possible_pointer > max_block_address)
  86. return;
  87. possible_pointers.set(possible_pointer, move(origin));
  88. } else {
  89. static_assert((sizeof(Value) % sizeof(FlatPtr*)) == 0);
  90. if (data < min_block_address || data > max_block_address)
  91. return;
  92. // In the 32-bit case we will look at the top and bottom part of Value separately we just
  93. // add both the upper and lower bytes as possible pointers.
  94. possible_pointers.set(data, move(origin));
  95. }
  96. }
  97. void Heap::find_min_and_max_block_addresses(FlatPtr& min_address, FlatPtr& max_address)
  98. {
  99. min_address = explode_byte(0xff);
  100. max_address = 0;
  101. for_each_block([&](auto& block) {
  102. min_address = min(min_address, reinterpret_cast<FlatPtr>(&block));
  103. max_address = max(max_address, reinterpret_cast<FlatPtr>(&block) + HeapBlockBase::block_size);
  104. return IterationDecision::Continue;
  105. });
  106. }
  107. template<typename Callback>
  108. static void for_each_cell_among_possible_pointers(HashTable<HeapBlock*> const& all_live_heap_blocks, HashMap<FlatPtr, HeapRoot>& possible_pointers, Callback callback)
  109. {
  110. for (auto possible_pointer : possible_pointers.keys()) {
  111. if (!possible_pointer)
  112. continue;
  113. auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<Cell const*>(possible_pointer));
  114. if (!all_live_heap_blocks.contains(possible_heap_block))
  115. continue;
  116. if (auto* cell = possible_heap_block->cell_from_possible_pointer(possible_pointer)) {
  117. callback(cell, possible_pointer);
  118. }
  119. }
  120. }
  121. class GraphConstructorVisitor final : public Cell::Visitor {
  122. public:
  123. explicit GraphConstructorVisitor(Heap& heap, HashMap<Cell*, HeapRoot> const& roots)
  124. : m_heap(heap)
  125. {
  126. m_heap.find_min_and_max_block_addresses(m_min_block_address, m_max_block_address);
  127. m_heap.for_each_block([&](auto& block) {
  128. m_all_live_heap_blocks.set(&block);
  129. return IterationDecision::Continue;
  130. });
  131. for (auto* root : roots.keys()) {
  132. visit(root);
  133. auto& graph_node = m_graph.ensure(reinterpret_cast<FlatPtr>(root));
  134. graph_node.class_name = root->class_name();
  135. graph_node.root_origin = *roots.get(root);
  136. }
  137. }
  138. virtual void visit_impl(Cell& cell) override
  139. {
  140. if (m_node_being_visited)
  141. m_node_being_visited->edges.set(reinterpret_cast<FlatPtr>(&cell));
  142. if (m_graph.get(reinterpret_cast<FlatPtr>(&cell)).has_value())
  143. return;
  144. m_work_queue.append(cell);
  145. }
  146. virtual void visit_possible_values(ReadonlyBytes bytes) override
  147. {
  148. HashMap<FlatPtr, HeapRoot> possible_pointers;
  149. auto* raw_pointer_sized_values = reinterpret_cast<FlatPtr const*>(bytes.data());
  150. for (size_t i = 0; i < (bytes.size() / sizeof(FlatPtr)); ++i)
  151. add_possible_value(possible_pointers, raw_pointer_sized_values[i], HeapRoot { .type = HeapRoot::Type::HeapFunctionCapturedPointer }, m_min_block_address, m_max_block_address);
  152. for_each_cell_among_possible_pointers(m_all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr) {
  153. if (m_node_being_visited)
  154. m_node_being_visited->edges.set(reinterpret_cast<FlatPtr>(&cell));
  155. if (m_graph.get(reinterpret_cast<FlatPtr>(&cell)).has_value())
  156. return;
  157. m_work_queue.append(*cell);
  158. });
  159. }
  160. void visit_all_cells()
  161. {
  162. while (!m_work_queue.is_empty()) {
  163. auto ptr = reinterpret_cast<FlatPtr>(&m_work_queue.last());
  164. m_node_being_visited = &m_graph.ensure(ptr);
  165. m_node_being_visited->class_name = m_work_queue.last().class_name();
  166. m_work_queue.take_last().visit_edges(*this);
  167. m_node_being_visited = nullptr;
  168. }
  169. }
  170. AK::JsonObject dump()
  171. {
  172. auto graph = AK::JsonObject();
  173. for (auto& it : m_graph) {
  174. AK::JsonArray edges;
  175. for (auto const& value : it.value.edges) {
  176. edges.must_append(ByteString::formatted("{}", value));
  177. }
  178. auto node = AK::JsonObject();
  179. if (it.value.root_origin.has_value()) {
  180. auto type = it.value.root_origin->type;
  181. auto location = it.value.root_origin->location;
  182. switch (type) {
  183. case HeapRoot::Type::Handle:
  184. node.set("root"sv, ByteString::formatted("Handle {} {}:{}", location->function_name(), location->filename(), location->line_number()));
  185. break;
  186. case HeapRoot::Type::MarkedVector:
  187. node.set("root"sv, "MarkedVector");
  188. break;
  189. case HeapRoot::Type::RegisterPointer:
  190. node.set("root"sv, "RegisterPointer");
  191. break;
  192. case HeapRoot::Type::StackPointer:
  193. node.set("root"sv, "StackPointer");
  194. break;
  195. case HeapRoot::Type::VM:
  196. node.set("root"sv, "VM");
  197. break;
  198. case HeapRoot::Type::SafeFunction:
  199. node.set("root"sv, ByteString::formatted("SafeFunction {} {}:{}", location->function_name(), location->filename(), location->line_number()));
  200. break;
  201. default:
  202. VERIFY_NOT_REACHED();
  203. }
  204. }
  205. node.set("class_name"sv, it.value.class_name);
  206. node.set("edges"sv, edges);
  207. graph.set(ByteString::number(it.key), node);
  208. }
  209. return graph;
  210. }
  211. private:
  212. struct GraphNode {
  213. Optional<HeapRoot> root_origin;
  214. StringView class_name;
  215. HashTable<FlatPtr> edges {};
  216. };
  217. GraphNode* m_node_being_visited { nullptr };
  218. Vector<Cell&> m_work_queue;
  219. HashMap<FlatPtr, GraphNode> m_graph;
  220. Heap& m_heap;
  221. HashTable<HeapBlock*> m_all_live_heap_blocks;
  222. FlatPtr m_min_block_address;
  223. FlatPtr m_max_block_address;
  224. };
  225. AK::JsonObject Heap::dump_graph()
  226. {
  227. HashMap<Cell*, HeapRoot> roots;
  228. gather_roots(roots);
  229. GraphConstructorVisitor visitor(*this, roots);
  230. vm().bytecode_interpreter().visit_edges(visitor);
  231. visitor.visit_all_cells();
  232. return visitor.dump();
  233. }
  234. void Heap::collect_garbage(CollectionType collection_type, bool print_report)
  235. {
  236. VERIFY(!m_collecting_garbage);
  237. TemporaryChange change(m_collecting_garbage, true);
  238. #ifdef AK_OS_SERENITY
  239. static size_t global_gc_counter = 0;
  240. perf_event(PERF_EVENT_SIGNPOST, gc_perf_string_id, global_gc_counter++);
  241. #endif
  242. Core::ElapsedTimer collection_measurement_timer;
  243. if (print_report)
  244. collection_measurement_timer.start();
  245. if (collection_type == CollectionType::CollectGarbage) {
  246. if (m_gc_deferrals) {
  247. m_should_gc_when_deferral_ends = true;
  248. return;
  249. }
  250. HashMap<Cell*, HeapRoot> roots;
  251. gather_roots(roots);
  252. mark_live_cells(roots);
  253. }
  254. finalize_unmarked_cells();
  255. sweep_dead_cells(print_report, collection_measurement_timer);
  256. }
  257. void Heap::gather_roots(HashMap<Cell*, HeapRoot>& roots)
  258. {
  259. vm().gather_roots(roots);
  260. gather_conservative_roots(roots);
  261. for (auto& handle : m_handles)
  262. roots.set(handle.cell(), HeapRoot { .type = HeapRoot::Type::Handle, .location = &handle.source_location() });
  263. for (auto& vector : m_marked_vectors)
  264. vector.gather_roots(roots);
  265. if constexpr (HEAP_DEBUG) {
  266. dbgln("gather_roots:");
  267. for (auto* root : roots.keys())
  268. dbgln(" + {}", root);
  269. }
  270. }
  271. #ifdef HAS_ADDRESS_SANITIZER
  272. __attribute__((no_sanitize("address"))) void Heap::gather_asan_fake_stack_roots(HashMap<FlatPtr, HeapRoot>& possible_pointers, FlatPtr addr, FlatPtr min_block_address, FlatPtr max_block_address)
  273. {
  274. void* begin = nullptr;
  275. void* end = nullptr;
  276. void* real_stack = __asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(), reinterpret_cast<void*>(addr), &begin, &end);
  277. if (real_stack != nullptr) {
  278. for (auto* real_stack_addr = reinterpret_cast<void const* const*>(begin); real_stack_addr < end; ++real_stack_addr) {
  279. void const* real_address = *real_stack_addr;
  280. if (real_address == nullptr)
  281. continue;
  282. add_possible_value(possible_pointers, reinterpret_cast<FlatPtr>(real_address), HeapRoot { .type = HeapRoot::Type::StackPointer }, min_block_address, max_block_address);
  283. }
  284. }
  285. }
  286. #else
  287. void Heap::gather_asan_fake_stack_roots(HashMap<FlatPtr, HeapRoot>&, FlatPtr, FlatPtr, FlatPtr)
  288. {
  289. }
  290. #endif
  291. __attribute__((no_sanitize("address"))) void Heap::gather_conservative_roots(HashMap<Cell*, HeapRoot>& roots)
  292. {
  293. FlatPtr dummy;
  294. dbgln_if(HEAP_DEBUG, "gather_conservative_roots:");
  295. jmp_buf buf;
  296. setjmp(buf);
  297. HashMap<FlatPtr, HeapRoot> possible_pointers;
  298. auto* raw_jmp_buf = reinterpret_cast<FlatPtr const*>(buf);
  299. FlatPtr min_block_address, max_block_address;
  300. find_min_and_max_block_addresses(min_block_address, max_block_address);
  301. for (size_t i = 0; i < ((size_t)sizeof(buf)) / sizeof(FlatPtr); ++i)
  302. add_possible_value(possible_pointers, raw_jmp_buf[i], HeapRoot { .type = HeapRoot::Type::RegisterPointer }, min_block_address, max_block_address);
  303. auto stack_reference = bit_cast<FlatPtr>(&dummy);
  304. auto& stack_info = m_vm.stack_info();
  305. for (FlatPtr stack_address = stack_reference; stack_address < stack_info.top(); stack_address += sizeof(FlatPtr)) {
  306. auto data = *reinterpret_cast<FlatPtr*>(stack_address);
  307. add_possible_value(possible_pointers, data, HeapRoot { .type = HeapRoot::Type::StackPointer }, min_block_address, max_block_address);
  308. gather_asan_fake_stack_roots(possible_pointers, data, min_block_address, max_block_address);
  309. }
  310. // NOTE: If we have any custom ranges registered, scan those as well.
  311. // This is where JS::SafeFunction closures get marked.
  312. if (s_custom_ranges_for_conservative_scan) {
  313. for (auto& custom_range : *s_custom_ranges_for_conservative_scan) {
  314. for (size_t i = 0; i < (custom_range.value / sizeof(FlatPtr)); ++i) {
  315. auto safe_function_location = s_safe_function_locations->get(custom_range.key);
  316. add_possible_value(possible_pointers, custom_range.key[i], HeapRoot { .type = HeapRoot::Type::SafeFunction, .location = *safe_function_location }, min_block_address, max_block_address);
  317. }
  318. }
  319. }
  320. for (auto& vector : m_conservative_vectors) {
  321. for (auto possible_value : vector.possible_values()) {
  322. add_possible_value(possible_pointers, possible_value, HeapRoot { .type = HeapRoot::Type::ConservativeVector }, min_block_address, max_block_address);
  323. }
  324. }
  325. HashTable<HeapBlock*> all_live_heap_blocks;
  326. for_each_block([&](auto& block) {
  327. all_live_heap_blocks.set(&block);
  328. return IterationDecision::Continue;
  329. });
  330. for_each_cell_among_possible_pointers(all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr possible_pointer) {
  331. if (cell->state() == Cell::State::Live) {
  332. dbgln_if(HEAP_DEBUG, " ?-> {}", (void const*)cell);
  333. roots.set(cell, *possible_pointers.get(possible_pointer));
  334. } else {
  335. dbgln_if(HEAP_DEBUG, " #-> {}", (void const*)cell);
  336. }
  337. });
  338. }
  339. class MarkingVisitor final : public Cell::Visitor {
  340. public:
  341. explicit MarkingVisitor(Heap& heap, HashMap<Cell*, HeapRoot> const& roots)
  342. : m_heap(heap)
  343. {
  344. m_heap.find_min_and_max_block_addresses(m_min_block_address, m_max_block_address);
  345. m_heap.for_each_block([&](auto& block) {
  346. m_all_live_heap_blocks.set(&block);
  347. return IterationDecision::Continue;
  348. });
  349. for (auto* root : roots.keys()) {
  350. visit(root);
  351. }
  352. }
  353. virtual void visit_impl(Cell& cell) override
  354. {
  355. if (cell.is_marked())
  356. return;
  357. dbgln_if(HEAP_DEBUG, " ! {}", &cell);
  358. cell.set_marked(true);
  359. m_work_queue.append(cell);
  360. }
  361. virtual void visit_possible_values(ReadonlyBytes bytes) override
  362. {
  363. HashMap<FlatPtr, HeapRoot> possible_pointers;
  364. auto* raw_pointer_sized_values = reinterpret_cast<FlatPtr const*>(bytes.data());
  365. for (size_t i = 0; i < (bytes.size() / sizeof(FlatPtr)); ++i)
  366. add_possible_value(possible_pointers, raw_pointer_sized_values[i], HeapRoot { .type = HeapRoot::Type::HeapFunctionCapturedPointer }, m_min_block_address, m_max_block_address);
  367. for_each_cell_among_possible_pointers(m_all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr) {
  368. if (cell->is_marked())
  369. return;
  370. if (cell->state() != Cell::State::Live)
  371. return;
  372. cell->set_marked(true);
  373. m_work_queue.append(*cell);
  374. });
  375. }
  376. void mark_all_live_cells()
  377. {
  378. while (!m_work_queue.is_empty()) {
  379. m_work_queue.take_last().visit_edges(*this);
  380. }
  381. }
  382. private:
  383. Heap& m_heap;
  384. Vector<Cell&> m_work_queue;
  385. HashTable<HeapBlock*> m_all_live_heap_blocks;
  386. FlatPtr m_min_block_address;
  387. FlatPtr m_max_block_address;
  388. };
  389. void Heap::mark_live_cells(HashMap<Cell*, HeapRoot> const& roots)
  390. {
  391. dbgln_if(HEAP_DEBUG, "mark_live_cells:");
  392. MarkingVisitor visitor(*this, roots);
  393. vm().bytecode_interpreter().visit_edges(visitor);
  394. visitor.mark_all_live_cells();
  395. for (auto& inverse_root : m_uprooted_cells)
  396. inverse_root->set_marked(false);
  397. m_uprooted_cells.clear();
  398. }
  399. bool Heap::cell_must_survive_garbage_collection(Cell const& cell)
  400. {
  401. if (!cell.overrides_must_survive_garbage_collection({}))
  402. return false;
  403. return cell.must_survive_garbage_collection();
  404. }
  405. void Heap::finalize_unmarked_cells()
  406. {
  407. for_each_block([&](auto& block) {
  408. block.template for_each_cell_in_state<Cell::State::Live>([](Cell* cell) {
  409. if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell))
  410. cell->finalize();
  411. });
  412. return IterationDecision::Continue;
  413. });
  414. }
  415. void Heap::sweep_dead_cells(bool print_report, Core::ElapsedTimer const& measurement_timer)
  416. {
  417. dbgln_if(HEAP_DEBUG, "sweep_dead_cells:");
  418. Vector<HeapBlock*, 32> empty_blocks;
  419. Vector<HeapBlock*, 32> full_blocks_that_became_usable;
  420. size_t collected_cells = 0;
  421. size_t live_cells = 0;
  422. size_t collected_cell_bytes = 0;
  423. size_t live_cell_bytes = 0;
  424. for_each_block([&](auto& block) {
  425. bool block_has_live_cells = false;
  426. bool block_was_full = block.is_full();
  427. block.template for_each_cell_in_state<Cell::State::Live>([&](Cell* cell) {
  428. if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell)) {
  429. dbgln_if(HEAP_DEBUG, " ~ {}", cell);
  430. block.deallocate(cell);
  431. ++collected_cells;
  432. collected_cell_bytes += block.cell_size();
  433. } else {
  434. cell->set_marked(false);
  435. block_has_live_cells = true;
  436. ++live_cells;
  437. live_cell_bytes += block.cell_size();
  438. }
  439. });
  440. if (!block_has_live_cells)
  441. empty_blocks.append(&block);
  442. else if (block_was_full != block.is_full())
  443. full_blocks_that_became_usable.append(&block);
  444. return IterationDecision::Continue;
  445. });
  446. for (auto& weak_container : m_weak_containers)
  447. weak_container.remove_dead_cells({});
  448. for (auto* block : empty_blocks) {
  449. dbgln_if(HEAP_DEBUG, " - HeapBlock empty @ {}: cell_size={}", block, block->cell_size());
  450. block->cell_allocator().block_did_become_empty({}, *block);
  451. }
  452. for (auto* block : full_blocks_that_became_usable) {
  453. dbgln_if(HEAP_DEBUG, " - HeapBlock usable again @ {}: cell_size={}", block, block->cell_size());
  454. block->cell_allocator().block_did_become_usable({}, *block);
  455. }
  456. if constexpr (HEAP_DEBUG) {
  457. for_each_block([&](auto& block) {
  458. dbgln(" > Live HeapBlock @ {}: cell_size={}", &block, block.cell_size());
  459. return IterationDecision::Continue;
  460. });
  461. }
  462. m_gc_bytes_threshold = live_cell_bytes > GC_MIN_BYTES_THRESHOLD ? live_cell_bytes : GC_MIN_BYTES_THRESHOLD;
  463. if (print_report) {
  464. Duration const time_spent = measurement_timer.elapsed_time();
  465. size_t live_block_count = 0;
  466. for_each_block([&](auto&) {
  467. ++live_block_count;
  468. return IterationDecision::Continue;
  469. });
  470. dbgln("Garbage collection report");
  471. dbgln("=============================================");
  472. dbgln(" Time spent: {} ms", time_spent.to_milliseconds());
  473. dbgln(" Live cells: {} ({} bytes)", live_cells, live_cell_bytes);
  474. dbgln("Collected cells: {} ({} bytes)", collected_cells, collected_cell_bytes);
  475. dbgln(" Live blocks: {} ({} bytes)", live_block_count, live_block_count * HeapBlock::block_size);
  476. dbgln(" Freed blocks: {} ({} bytes)", empty_blocks.size(), empty_blocks.size() * HeapBlock::block_size);
  477. dbgln("=============================================");
  478. }
  479. }
  480. void Heap::defer_gc()
  481. {
  482. ++m_gc_deferrals;
  483. }
  484. void Heap::undefer_gc()
  485. {
  486. VERIFY(m_gc_deferrals > 0);
  487. --m_gc_deferrals;
  488. if (!m_gc_deferrals) {
  489. if (m_should_gc_when_deferral_ends)
  490. collect_garbage();
  491. m_should_gc_when_deferral_ends = false;
  492. }
  493. }
  494. void Heap::uproot_cell(Cell* cell)
  495. {
  496. m_uprooted_cells.append(cell);
  497. }
  498. void register_safe_function_closure(void* base, size_t size, SourceLocation* location)
  499. {
  500. if (!s_custom_ranges_for_conservative_scan) {
  501. // FIXME: This per-thread HashMap is currently leaked on thread exit.
  502. s_custom_ranges_for_conservative_scan = new HashMap<FlatPtr*, size_t>;
  503. }
  504. if (!s_safe_function_locations) {
  505. s_safe_function_locations = new HashMap<FlatPtr*, SourceLocation*>;
  506. }
  507. auto result = s_custom_ranges_for_conservative_scan->set(reinterpret_cast<FlatPtr*>(base), size);
  508. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  509. result = s_safe_function_locations->set(reinterpret_cast<FlatPtr*>(base), location);
  510. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  511. }
  512. void unregister_safe_function_closure(void* base, size_t, SourceLocation*)
  513. {
  514. VERIFY(s_custom_ranges_for_conservative_scan);
  515. bool did_remove_range = s_custom_ranges_for_conservative_scan->remove(reinterpret_cast<FlatPtr*>(base));
  516. VERIFY(did_remove_range);
  517. bool did_remove_location = s_safe_function_locations->remove(reinterpret_cast<FlatPtr*>(base));
  518. VERIFY(did_remove_location);
  519. }
  520. }