Heap.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. /*
  2. * Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2023, Aliaksandr Kalenik <kalenik.aliaksandr@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/Badge.h>
  8. #include <AK/Debug.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/JsonArray.h>
  11. #include <AK/JsonObject.h>
  12. #include <AK/StackInfo.h>
  13. #include <AK/TemporaryChange.h>
  14. #include <LibCore/ElapsedTimer.h>
  15. #include <LibJS/Bytecode/Interpreter.h>
  16. #include <LibJS/Heap/CellAllocator.h>
  17. #include <LibJS/Heap/Handle.h>
  18. #include <LibJS/Heap/Heap.h>
  19. #include <LibJS/Heap/HeapBlock.h>
  20. #include <LibJS/Runtime/Object.h>
  21. #include <LibJS/Runtime/WeakContainer.h>
  22. #include <LibJS/SafeFunction.h>
  23. #include <setjmp.h>
  24. #ifdef AK_OS_SERENITY
  25. # include <serenity.h>
  26. #endif
  27. #ifdef HAS_ADDRESS_SANITIZER
  28. # include <sanitizer/asan_interface.h>
  29. #endif
  30. namespace JS {
  31. #ifdef AK_OS_SERENITY
  32. static int gc_perf_string_id;
  33. #endif
  34. // NOTE: We keep a per-thread list of custom ranges. This hinges on the assumption that there is one JS VM per thread.
  35. static __thread HashMap<FlatPtr*, size_t>* s_custom_ranges_for_conservative_scan = nullptr;
  36. static __thread HashMap<FlatPtr*, SourceLocation*>* s_safe_function_locations = nullptr;
  37. Heap::Heap(VM& vm)
  38. : HeapBase(vm)
  39. {
  40. #ifdef AK_OS_SERENITY
  41. auto gc_signpost_string = "Garbage collection"sv;
  42. gc_perf_string_id = perf_register_string(gc_signpost_string.characters_without_null_termination(), gc_signpost_string.length());
  43. #endif
  44. if constexpr (HeapBlock::min_possible_cell_size <= 16) {
  45. m_size_based_cell_allocators.append(make<CellAllocator>(16));
  46. }
  47. static_assert(HeapBlock::min_possible_cell_size <= 24, "Heap Cell tracking uses too much data!");
  48. m_size_based_cell_allocators.append(make<CellAllocator>(32));
  49. m_size_based_cell_allocators.append(make<CellAllocator>(64));
  50. m_size_based_cell_allocators.append(make<CellAllocator>(96));
  51. m_size_based_cell_allocators.append(make<CellAllocator>(128));
  52. m_size_based_cell_allocators.append(make<CellAllocator>(256));
  53. m_size_based_cell_allocators.append(make<CellAllocator>(512));
  54. m_size_based_cell_allocators.append(make<CellAllocator>(1024));
  55. m_size_based_cell_allocators.append(make<CellAllocator>(3072));
  56. }
  57. Heap::~Heap()
  58. {
  59. vm().string_cache().clear();
  60. vm().byte_string_cache().clear();
  61. collect_garbage(CollectionType::CollectEverything);
  62. }
  63. void Heap::will_allocate(size_t size)
  64. {
  65. if (should_collect_on_every_allocation()) {
  66. m_allocated_bytes_since_last_gc = 0;
  67. collect_garbage();
  68. } else if (m_allocated_bytes_since_last_gc + size > m_gc_bytes_threshold) {
  69. m_allocated_bytes_since_last_gc = 0;
  70. collect_garbage();
  71. }
  72. m_allocated_bytes_since_last_gc += size;
  73. }
  74. static void add_possible_value(HashMap<FlatPtr, HeapRoot>& possible_pointers, FlatPtr data, HeapRoot origin, FlatPtr min_block_address, FlatPtr max_block_address)
  75. {
  76. if constexpr (sizeof(FlatPtr*) == sizeof(Value)) {
  77. // Because Value stores pointers in non-canonical form we have to check if the top bytes
  78. // match any pointer-backed tag, in that case we have to extract the pointer to its
  79. // canonical form and add that as a possible pointer.
  80. FlatPtr possible_pointer;
  81. if ((data & SHIFTED_IS_CELL_PATTERN) == SHIFTED_IS_CELL_PATTERN)
  82. possible_pointer = Value::extract_pointer_bits(data);
  83. else
  84. possible_pointer = data;
  85. if (possible_pointer < min_block_address || possible_pointer > max_block_address)
  86. return;
  87. possible_pointers.set(possible_pointer, move(origin));
  88. } else {
  89. static_assert((sizeof(Value) % sizeof(FlatPtr*)) == 0);
  90. if (data < min_block_address || data > max_block_address)
  91. return;
  92. // In the 32-bit case we will look at the top and bottom part of Value separately we just
  93. // add both the upper and lower bytes as possible pointers.
  94. possible_pointers.set(data, move(origin));
  95. }
  96. }
  97. void Heap::find_min_and_max_block_addresses(FlatPtr& min_address, FlatPtr& max_address)
  98. {
  99. min_address = explode_byte(0xff);
  100. max_address = 0;
  101. for_each_block([&](auto& block) {
  102. min_address = min(min_address, reinterpret_cast<FlatPtr>(&block));
  103. max_address = max(max_address, reinterpret_cast<FlatPtr>(&block) + HeapBlockBase::block_size);
  104. return IterationDecision::Continue;
  105. });
  106. }
  107. template<typename Callback>
  108. static void for_each_cell_among_possible_pointers(HashTable<HeapBlock*> const& all_live_heap_blocks, HashMap<FlatPtr, HeapRoot>& possible_pointers, Callback callback)
  109. {
  110. for (auto possible_pointer : possible_pointers.keys()) {
  111. if (!possible_pointer)
  112. continue;
  113. auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<Cell const*>(possible_pointer));
  114. if (!all_live_heap_blocks.contains(possible_heap_block))
  115. continue;
  116. if (auto* cell = possible_heap_block->cell_from_possible_pointer(possible_pointer)) {
  117. callback(cell, possible_pointer);
  118. }
  119. }
  120. }
  121. class GraphConstructorVisitor final : public Cell::Visitor {
  122. public:
  123. explicit GraphConstructorVisitor(Heap& heap, HashMap<Cell*, HeapRoot> const& roots)
  124. : m_heap(heap)
  125. {
  126. m_heap.find_min_and_max_block_addresses(m_min_block_address, m_max_block_address);
  127. m_heap.for_each_block([&](auto& block) {
  128. m_all_live_heap_blocks.set(&block);
  129. return IterationDecision::Continue;
  130. });
  131. for (auto* root : roots.keys()) {
  132. visit(root);
  133. auto& graph_node = m_graph.ensure(reinterpret_cast<FlatPtr>(root));
  134. graph_node.class_name = root->class_name();
  135. graph_node.root_origin = *roots.get(root);
  136. }
  137. }
  138. virtual void visit_impl(Cell& cell) override
  139. {
  140. if (m_node_being_visited)
  141. m_node_being_visited->edges.set(reinterpret_cast<FlatPtr>(&cell));
  142. if (m_graph.get(reinterpret_cast<FlatPtr>(&cell)).has_value())
  143. return;
  144. m_work_queue.append(cell);
  145. }
  146. virtual void visit_possible_values(ReadonlyBytes bytes) override
  147. {
  148. HashMap<FlatPtr, HeapRoot> possible_pointers;
  149. auto* raw_pointer_sized_values = reinterpret_cast<FlatPtr const*>(bytes.data());
  150. for (size_t i = 0; i < (bytes.size() / sizeof(FlatPtr)); ++i)
  151. add_possible_value(possible_pointers, raw_pointer_sized_values[i], HeapRoot { .type = HeapRoot::Type::HeapFunctionCapturedPointer }, m_min_block_address, m_max_block_address);
  152. for_each_cell_among_possible_pointers(m_all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr) {
  153. if (m_node_being_visited)
  154. m_node_being_visited->edges.set(reinterpret_cast<FlatPtr>(&cell));
  155. if (m_graph.get(reinterpret_cast<FlatPtr>(&cell)).has_value())
  156. return;
  157. m_work_queue.append(*cell);
  158. });
  159. }
  160. void visit_all_cells()
  161. {
  162. while (!m_work_queue.is_empty()) {
  163. auto ptr = reinterpret_cast<FlatPtr>(&m_work_queue.last());
  164. m_node_being_visited = &m_graph.ensure(ptr);
  165. m_node_being_visited->class_name = m_work_queue.last().class_name();
  166. m_work_queue.take_last().visit_edges(*this);
  167. m_node_being_visited = nullptr;
  168. }
  169. }
  170. AK::JsonObject dump()
  171. {
  172. auto graph = AK::JsonObject();
  173. for (auto& it : m_graph) {
  174. AK::JsonArray edges;
  175. for (auto const& value : it.value.edges) {
  176. edges.must_append(ByteString::formatted("{}", value));
  177. }
  178. auto node = AK::JsonObject();
  179. if (it.value.root_origin.has_value()) {
  180. auto type = it.value.root_origin->type;
  181. auto location = it.value.root_origin->location;
  182. switch (type) {
  183. case HeapRoot::Type::Handle:
  184. node.set("root"sv, ByteString::formatted("Handle {} {}:{}", location->function_name(), location->filename(), location->line_number()));
  185. break;
  186. case HeapRoot::Type::MarkedVector:
  187. node.set("root"sv, "MarkedVector");
  188. break;
  189. case HeapRoot::Type::RegisterPointer:
  190. node.set("root"sv, "RegisterPointer");
  191. break;
  192. case HeapRoot::Type::StackPointer:
  193. node.set("root"sv, "StackPointer");
  194. break;
  195. case HeapRoot::Type::VM:
  196. node.set("root"sv, "VM");
  197. break;
  198. case HeapRoot::Type::SafeFunction:
  199. node.set("root"sv, ByteString::formatted("SafeFunction {} {}:{}", location->function_name(), location->filename(), location->line_number()));
  200. break;
  201. default:
  202. VERIFY_NOT_REACHED();
  203. }
  204. }
  205. node.set("class_name"sv, it.value.class_name);
  206. node.set("edges"sv, edges);
  207. graph.set(ByteString::number(it.key), node);
  208. }
  209. return graph;
  210. }
  211. private:
  212. struct GraphNode {
  213. Optional<HeapRoot> root_origin;
  214. StringView class_name;
  215. HashTable<FlatPtr> edges {};
  216. };
  217. GraphNode* m_node_being_visited { nullptr };
  218. Vector<Cell&> m_work_queue;
  219. HashMap<FlatPtr, GraphNode> m_graph;
  220. Heap& m_heap;
  221. HashTable<HeapBlock*> m_all_live_heap_blocks;
  222. FlatPtr m_min_block_address;
  223. FlatPtr m_max_block_address;
  224. };
  225. AK::JsonObject Heap::dump_graph()
  226. {
  227. HashMap<Cell*, HeapRoot> roots;
  228. gather_roots(roots);
  229. GraphConstructorVisitor visitor(*this, roots);
  230. vm().bytecode_interpreter().visit_edges(visitor);
  231. visitor.visit_all_cells();
  232. return visitor.dump();
  233. }
  234. void Heap::collect_garbage(CollectionType collection_type, bool print_report)
  235. {
  236. VERIFY(!m_collecting_garbage);
  237. TemporaryChange change(m_collecting_garbage, true);
  238. #ifdef AK_OS_SERENITY
  239. static size_t global_gc_counter = 0;
  240. perf_event(PERF_EVENT_SIGNPOST, gc_perf_string_id, global_gc_counter++);
  241. #endif
  242. Core::ElapsedTimer collection_measurement_timer;
  243. if (print_report)
  244. collection_measurement_timer.start();
  245. if (collection_type == CollectionType::CollectGarbage) {
  246. if (m_gc_deferrals) {
  247. m_should_gc_when_deferral_ends = true;
  248. return;
  249. }
  250. HashMap<Cell*, HeapRoot> roots;
  251. gather_roots(roots);
  252. mark_live_cells(roots);
  253. }
  254. finalize_unmarked_cells();
  255. sweep_dead_cells(print_report, collection_measurement_timer);
  256. }
  257. void Heap::gather_roots(HashMap<Cell*, HeapRoot>& roots)
  258. {
  259. vm().gather_roots(roots);
  260. gather_conservative_roots(roots);
  261. for (auto& handle : m_handles)
  262. roots.set(handle.cell(), HeapRoot { .type = HeapRoot::Type::Handle, .location = &handle.source_location() });
  263. for (auto& vector : m_marked_vectors)
  264. vector.gather_roots(roots);
  265. if constexpr (HEAP_DEBUG) {
  266. dbgln("gather_roots:");
  267. for (auto* root : roots.keys())
  268. dbgln(" + {}", root);
  269. }
  270. }
  271. #ifdef HAS_ADDRESS_SANITIZER
  272. __attribute__((no_sanitize("address"))) void Heap::gather_asan_fake_stack_roots(HashMap<FlatPtr, HeapRoot>& possible_pointers, FlatPtr addr, FlatPtr min_block_address, FlatPtr max_block_address)
  273. {
  274. void* begin = nullptr;
  275. void* end = nullptr;
  276. void* real_stack = __asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(), reinterpret_cast<void*>(addr), &begin, &end);
  277. if (real_stack != nullptr) {
  278. for (auto* real_stack_addr = reinterpret_cast<void const* const*>(begin); real_stack_addr < end; ++real_stack_addr) {
  279. void const* real_address = *real_stack_addr;
  280. if (real_address == nullptr)
  281. continue;
  282. add_possible_value(possible_pointers, reinterpret_cast<FlatPtr>(real_address), HeapRoot { .type = HeapRoot::Type::StackPointer }, min_block_address, max_block_address);
  283. }
  284. }
  285. }
  286. #else
  287. void Heap::gather_asan_fake_stack_roots(HashMap<FlatPtr, HeapRoot>&, FlatPtr, FlatPtr, FlatPtr)
  288. {
  289. }
  290. #endif
  291. __attribute__((no_sanitize("address"))) void Heap::gather_conservative_roots(HashMap<Cell*, HeapRoot>& roots)
  292. {
  293. FlatPtr dummy;
  294. dbgln_if(HEAP_DEBUG, "gather_conservative_roots:");
  295. jmp_buf buf;
  296. setjmp(buf);
  297. HashMap<FlatPtr, HeapRoot> possible_pointers;
  298. auto* raw_jmp_buf = reinterpret_cast<FlatPtr const*>(buf);
  299. FlatPtr min_block_address, max_block_address;
  300. find_min_and_max_block_addresses(min_block_address, max_block_address);
  301. for (size_t i = 0; i < ((size_t)sizeof(buf)) / sizeof(FlatPtr); ++i)
  302. add_possible_value(possible_pointers, raw_jmp_buf[i], HeapRoot { .type = HeapRoot::Type::RegisterPointer }, min_block_address, max_block_address);
  303. auto stack_reference = bit_cast<FlatPtr>(&dummy);
  304. auto& stack_info = m_vm.stack_info();
  305. for (FlatPtr stack_address = stack_reference; stack_address < stack_info.top(); stack_address += sizeof(FlatPtr)) {
  306. auto data = *reinterpret_cast<FlatPtr*>(stack_address);
  307. add_possible_value(possible_pointers, data, HeapRoot { .type = HeapRoot::Type::StackPointer }, min_block_address, max_block_address);
  308. gather_asan_fake_stack_roots(possible_pointers, data, min_block_address, max_block_address);
  309. }
  310. // NOTE: If we have any custom ranges registered, scan those as well.
  311. // This is where JS::SafeFunction closures get marked.
  312. if (s_custom_ranges_for_conservative_scan) {
  313. for (auto& custom_range : *s_custom_ranges_for_conservative_scan) {
  314. for (size_t i = 0; i < (custom_range.value / sizeof(FlatPtr)); ++i) {
  315. auto safe_function_location = s_safe_function_locations->get(custom_range.key);
  316. add_possible_value(possible_pointers, custom_range.key[i], HeapRoot { .type = HeapRoot::Type::SafeFunction, .location = *safe_function_location }, min_block_address, max_block_address);
  317. }
  318. }
  319. }
  320. HashTable<HeapBlock*> all_live_heap_blocks;
  321. for_each_block([&](auto& block) {
  322. all_live_heap_blocks.set(&block);
  323. return IterationDecision::Continue;
  324. });
  325. for_each_cell_among_possible_pointers(all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr possible_pointer) {
  326. if (cell->state() == Cell::State::Live) {
  327. dbgln_if(HEAP_DEBUG, " ?-> {}", (void const*)cell);
  328. roots.set(cell, *possible_pointers.get(possible_pointer));
  329. } else {
  330. dbgln_if(HEAP_DEBUG, " #-> {}", (void const*)cell);
  331. }
  332. });
  333. }
  334. class MarkingVisitor final : public Cell::Visitor {
  335. public:
  336. explicit MarkingVisitor(Heap& heap, HashMap<Cell*, HeapRoot> const& roots)
  337. : m_heap(heap)
  338. {
  339. m_heap.find_min_and_max_block_addresses(m_min_block_address, m_max_block_address);
  340. m_heap.for_each_block([&](auto& block) {
  341. m_all_live_heap_blocks.set(&block);
  342. return IterationDecision::Continue;
  343. });
  344. for (auto* root : roots.keys()) {
  345. visit(root);
  346. }
  347. }
  348. virtual void visit_impl(Cell& cell) override
  349. {
  350. if (cell.is_marked())
  351. return;
  352. dbgln_if(HEAP_DEBUG, " ! {}", &cell);
  353. cell.set_marked(true);
  354. m_work_queue.append(cell);
  355. }
  356. virtual void visit_possible_values(ReadonlyBytes bytes) override
  357. {
  358. HashMap<FlatPtr, HeapRoot> possible_pointers;
  359. auto* raw_pointer_sized_values = reinterpret_cast<FlatPtr const*>(bytes.data());
  360. for (size_t i = 0; i < (bytes.size() / sizeof(FlatPtr)); ++i)
  361. add_possible_value(possible_pointers, raw_pointer_sized_values[i], HeapRoot { .type = HeapRoot::Type::HeapFunctionCapturedPointer }, m_min_block_address, m_max_block_address);
  362. for_each_cell_among_possible_pointers(m_all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr) {
  363. if (cell->is_marked())
  364. return;
  365. if (cell->state() != Cell::State::Live)
  366. return;
  367. cell->set_marked(true);
  368. m_work_queue.append(*cell);
  369. });
  370. }
  371. void mark_all_live_cells()
  372. {
  373. while (!m_work_queue.is_empty()) {
  374. m_work_queue.take_last().visit_edges(*this);
  375. }
  376. }
  377. private:
  378. Heap& m_heap;
  379. Vector<Cell&> m_work_queue;
  380. HashTable<HeapBlock*> m_all_live_heap_blocks;
  381. FlatPtr m_min_block_address;
  382. FlatPtr m_max_block_address;
  383. };
  384. void Heap::mark_live_cells(HashMap<Cell*, HeapRoot> const& roots)
  385. {
  386. dbgln_if(HEAP_DEBUG, "mark_live_cells:");
  387. MarkingVisitor visitor(*this, roots);
  388. vm().bytecode_interpreter().visit_edges(visitor);
  389. visitor.mark_all_live_cells();
  390. for (auto& inverse_root : m_uprooted_cells)
  391. inverse_root->set_marked(false);
  392. m_uprooted_cells.clear();
  393. }
  394. bool Heap::cell_must_survive_garbage_collection(Cell const& cell)
  395. {
  396. if (!cell.overrides_must_survive_garbage_collection({}))
  397. return false;
  398. return cell.must_survive_garbage_collection();
  399. }
  400. void Heap::finalize_unmarked_cells()
  401. {
  402. for_each_block([&](auto& block) {
  403. block.template for_each_cell_in_state<Cell::State::Live>([](Cell* cell) {
  404. if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell))
  405. cell->finalize();
  406. });
  407. return IterationDecision::Continue;
  408. });
  409. }
  410. void Heap::sweep_dead_cells(bool print_report, Core::ElapsedTimer const& measurement_timer)
  411. {
  412. dbgln_if(HEAP_DEBUG, "sweep_dead_cells:");
  413. Vector<HeapBlock*, 32> empty_blocks;
  414. Vector<HeapBlock*, 32> full_blocks_that_became_usable;
  415. size_t collected_cells = 0;
  416. size_t live_cells = 0;
  417. size_t collected_cell_bytes = 0;
  418. size_t live_cell_bytes = 0;
  419. for_each_block([&](auto& block) {
  420. bool block_has_live_cells = false;
  421. bool block_was_full = block.is_full();
  422. block.template for_each_cell_in_state<Cell::State::Live>([&](Cell* cell) {
  423. if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell)) {
  424. dbgln_if(HEAP_DEBUG, " ~ {}", cell);
  425. block.deallocate(cell);
  426. ++collected_cells;
  427. collected_cell_bytes += block.cell_size();
  428. } else {
  429. cell->set_marked(false);
  430. block_has_live_cells = true;
  431. ++live_cells;
  432. live_cell_bytes += block.cell_size();
  433. }
  434. });
  435. if (!block_has_live_cells)
  436. empty_blocks.append(&block);
  437. else if (block_was_full != block.is_full())
  438. full_blocks_that_became_usable.append(&block);
  439. return IterationDecision::Continue;
  440. });
  441. for (auto& weak_container : m_weak_containers)
  442. weak_container.remove_dead_cells({});
  443. for (auto* block : empty_blocks) {
  444. dbgln_if(HEAP_DEBUG, " - HeapBlock empty @ {}: cell_size={}", block, block->cell_size());
  445. block->cell_allocator().block_did_become_empty({}, *block);
  446. }
  447. for (auto* block : full_blocks_that_became_usable) {
  448. dbgln_if(HEAP_DEBUG, " - HeapBlock usable again @ {}: cell_size={}", block, block->cell_size());
  449. block->cell_allocator().block_did_become_usable({}, *block);
  450. }
  451. if constexpr (HEAP_DEBUG) {
  452. for_each_block([&](auto& block) {
  453. dbgln(" > Live HeapBlock @ {}: cell_size={}", &block, block.cell_size());
  454. return IterationDecision::Continue;
  455. });
  456. }
  457. m_gc_bytes_threshold = live_cell_bytes > GC_MIN_BYTES_THRESHOLD ? live_cell_bytes : GC_MIN_BYTES_THRESHOLD;
  458. if (print_report) {
  459. Duration const time_spent = measurement_timer.elapsed_time();
  460. size_t live_block_count = 0;
  461. for_each_block([&](auto&) {
  462. ++live_block_count;
  463. return IterationDecision::Continue;
  464. });
  465. dbgln("Garbage collection report");
  466. dbgln("=============================================");
  467. dbgln(" Time spent: {} ms", time_spent.to_milliseconds());
  468. dbgln(" Live cells: {} ({} bytes)", live_cells, live_cell_bytes);
  469. dbgln("Collected cells: {} ({} bytes)", collected_cells, collected_cell_bytes);
  470. dbgln(" Live blocks: {} ({} bytes)", live_block_count, live_block_count * HeapBlock::block_size);
  471. dbgln(" Freed blocks: {} ({} bytes)", empty_blocks.size(), empty_blocks.size() * HeapBlock::block_size);
  472. dbgln("=============================================");
  473. }
  474. }
  475. void Heap::defer_gc()
  476. {
  477. ++m_gc_deferrals;
  478. }
  479. void Heap::undefer_gc()
  480. {
  481. VERIFY(m_gc_deferrals > 0);
  482. --m_gc_deferrals;
  483. if (!m_gc_deferrals) {
  484. if (m_should_gc_when_deferral_ends)
  485. collect_garbage();
  486. m_should_gc_when_deferral_ends = false;
  487. }
  488. }
  489. void Heap::uproot_cell(Cell* cell)
  490. {
  491. m_uprooted_cells.append(cell);
  492. }
  493. void register_safe_function_closure(void* base, size_t size, SourceLocation* location)
  494. {
  495. if (!s_custom_ranges_for_conservative_scan) {
  496. // FIXME: This per-thread HashMap is currently leaked on thread exit.
  497. s_custom_ranges_for_conservative_scan = new HashMap<FlatPtr*, size_t>;
  498. }
  499. if (!s_safe_function_locations) {
  500. s_safe_function_locations = new HashMap<FlatPtr*, SourceLocation*>;
  501. }
  502. auto result = s_custom_ranges_for_conservative_scan->set(reinterpret_cast<FlatPtr*>(base), size);
  503. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  504. result = s_safe_function_locations->set(reinterpret_cast<FlatPtr*>(base), location);
  505. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  506. }
  507. void unregister_safe_function_closure(void* base, size_t, SourceLocation*)
  508. {
  509. VERIFY(s_custom_ranges_for_conservative_scan);
  510. bool did_remove_range = s_custom_ranges_for_conservative_scan->remove(reinterpret_cast<FlatPtr*>(base));
  511. VERIFY(did_remove_range);
  512. bool did_remove_location = s_safe_function_locations->remove(reinterpret_cast<FlatPtr*>(base));
  513. VERIFY(did_remove_location);
  514. }
  515. }