Heap.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /*
  2. * Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Badge.h>
  7. #include <AK/Debug.h>
  8. #include <AK/HashTable.h>
  9. #include <AK/StackInfo.h>
  10. #include <AK/TemporaryChange.h>
  11. #include <LibCore/ElapsedTimer.h>
  12. #include <LibJS/Heap/CellAllocator.h>
  13. #include <LibJS/Heap/Handle.h>
  14. #include <LibJS/Heap/Heap.h>
  15. #include <LibJS/Heap/HeapBlock.h>
  16. #include <LibJS/Interpreter.h>
  17. #include <LibJS/Runtime/Object.h>
  18. #include <LibJS/Runtime/WeakContainer.h>
  19. #include <LibJS/SafeFunction.h>
  20. #include <setjmp.h>
  21. #ifdef AK_OS_SERENITY
  22. # include <serenity.h>
  23. #endif
  24. namespace JS {
  25. #ifdef AK_OS_SERENITY
  26. static int gc_perf_string_id;
  27. #endif
  28. // NOTE: We keep a per-thread list of custom ranges. This hinges on the assumption that there is one JS VM per thread.
  29. static __thread HashMap<FlatPtr*, size_t>* s_custom_ranges_for_conservative_scan = nullptr;
  30. Heap::Heap(VM& vm)
  31. : m_vm(vm)
  32. {
  33. #ifdef AK_OS_SERENITY
  34. auto gc_signpost_string = "Garbage collection"sv;
  35. gc_perf_string_id = perf_register_string(gc_signpost_string.characters_without_null_termination(), gc_signpost_string.length());
  36. #endif
  37. if constexpr (HeapBlock::min_possible_cell_size <= 16) {
  38. m_allocators.append(make<CellAllocator>(16));
  39. }
  40. static_assert(HeapBlock::min_possible_cell_size <= 24, "Heap Cell tracking uses too much data!");
  41. m_allocators.append(make<CellAllocator>(32));
  42. m_allocators.append(make<CellAllocator>(64));
  43. m_allocators.append(make<CellAllocator>(96));
  44. m_allocators.append(make<CellAllocator>(128));
  45. m_allocators.append(make<CellAllocator>(256));
  46. m_allocators.append(make<CellAllocator>(512));
  47. m_allocators.append(make<CellAllocator>(1024));
  48. m_allocators.append(make<CellAllocator>(3072));
  49. }
  50. Heap::~Heap()
  51. {
  52. vm().string_cache().clear();
  53. collect_garbage(CollectionType::CollectEverything);
  54. }
  55. ALWAYS_INLINE CellAllocator& Heap::allocator_for_size(size_t cell_size)
  56. {
  57. for (auto& allocator : m_allocators) {
  58. if (allocator->cell_size() >= cell_size)
  59. return *allocator;
  60. }
  61. dbgln("Cannot get CellAllocator for cell size {}, largest available is {}!", cell_size, m_allocators.last()->cell_size());
  62. VERIFY_NOT_REACHED();
  63. }
  64. Cell* Heap::allocate_cell(size_t size)
  65. {
  66. if (should_collect_on_every_allocation()) {
  67. collect_garbage();
  68. } else if (m_allocations_since_last_gc > m_max_allocations_between_gc) {
  69. m_allocations_since_last_gc = 0;
  70. collect_garbage();
  71. } else {
  72. ++m_allocations_since_last_gc;
  73. }
  74. auto& allocator = allocator_for_size(size);
  75. return allocator.allocate_cell(*this);
  76. }
  77. void Heap::collect_garbage(CollectionType collection_type, bool print_report)
  78. {
  79. VERIFY(!m_collecting_garbage);
  80. TemporaryChange change(m_collecting_garbage, true);
  81. #ifdef AK_OS_SERENITY
  82. static size_t global_gc_counter = 0;
  83. perf_event(PERF_EVENT_SIGNPOST, gc_perf_string_id, global_gc_counter++);
  84. #endif
  85. Core::ElapsedTimer collection_measurement_timer;
  86. if (print_report)
  87. collection_measurement_timer.start();
  88. if (collection_type == CollectionType::CollectGarbage) {
  89. if (m_gc_deferrals) {
  90. m_should_gc_when_deferral_ends = true;
  91. return;
  92. }
  93. HashTable<Cell*> roots;
  94. gather_roots(roots);
  95. mark_live_cells(roots);
  96. }
  97. finalize_unmarked_cells();
  98. sweep_dead_cells(print_report, collection_measurement_timer);
  99. }
  100. void Heap::gather_roots(HashTable<Cell*>& roots)
  101. {
  102. vm().gather_roots(roots);
  103. gather_conservative_roots(roots);
  104. for (auto& handle : m_handles)
  105. roots.set(handle.cell());
  106. for (auto& vector : m_marked_vectors)
  107. vector.gather_roots(roots);
  108. if constexpr (HEAP_DEBUG) {
  109. dbgln("gather_roots:");
  110. for (auto* root : roots)
  111. dbgln(" + {}", root);
  112. }
  113. }
  114. __attribute__((no_sanitize("address"))) void Heap::gather_conservative_roots(HashTable<Cell*>& roots)
  115. {
  116. FlatPtr dummy;
  117. dbgln_if(HEAP_DEBUG, "gather_conservative_roots:");
  118. jmp_buf buf;
  119. setjmp(buf);
  120. HashTable<FlatPtr> possible_pointers;
  121. auto* raw_jmp_buf = reinterpret_cast<FlatPtr const*>(buf);
  122. auto add_possible_value = [&](FlatPtr data) {
  123. if constexpr (sizeof(FlatPtr*) == sizeof(Value)) {
  124. // Because Value stores pointers in non-canonical form we have to check if the top bytes
  125. // match any pointer-backed tag, in that case we have to extract the pointer to its
  126. // canonical form and add that as a possible pointer.
  127. if ((data & SHIFTED_IS_CELL_PATTERN) == SHIFTED_IS_CELL_PATTERN)
  128. possible_pointers.set(Value::extract_pointer_bits(data));
  129. else
  130. possible_pointers.set(data);
  131. } else {
  132. static_assert((sizeof(Value) % sizeof(FlatPtr*)) == 0);
  133. // In the 32-bit case we will look at the top and bottom part of Value separately we just
  134. // add both the upper and lower bytes as possible pointers.
  135. possible_pointers.set(data);
  136. }
  137. };
  138. for (size_t i = 0; i < ((size_t)sizeof(buf)) / sizeof(FlatPtr); i += sizeof(FlatPtr))
  139. add_possible_value(raw_jmp_buf[i]);
  140. auto stack_reference = bit_cast<FlatPtr>(&dummy);
  141. auto& stack_info = m_vm.stack_info();
  142. for (FlatPtr stack_address = stack_reference; stack_address < stack_info.top(); stack_address += sizeof(FlatPtr)) {
  143. auto data = *reinterpret_cast<FlatPtr*>(stack_address);
  144. add_possible_value(data);
  145. }
  146. // NOTE: If we have any custom ranges registered, scan those as well.
  147. // This is where JS::SafeFunction closures get marked.
  148. if (s_custom_ranges_for_conservative_scan) {
  149. for (auto& custom_range : *s_custom_ranges_for_conservative_scan) {
  150. for (size_t i = 0; i < (custom_range.value / sizeof(FlatPtr)); ++i) {
  151. add_possible_value(custom_range.key[i]);
  152. }
  153. }
  154. }
  155. HashTable<HeapBlock*> all_live_heap_blocks;
  156. for_each_block([&](auto& block) {
  157. all_live_heap_blocks.set(&block);
  158. return IterationDecision::Continue;
  159. });
  160. for (auto possible_pointer : possible_pointers) {
  161. if (!possible_pointer)
  162. continue;
  163. dbgln_if(HEAP_DEBUG, " ? {}", (void const*)possible_pointer);
  164. auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<Cell const*>(possible_pointer));
  165. if (all_live_heap_blocks.contains(possible_heap_block)) {
  166. if (auto* cell = possible_heap_block->cell_from_possible_pointer(possible_pointer)) {
  167. if (cell->state() == Cell::State::Live) {
  168. dbgln_if(HEAP_DEBUG, " ?-> {}", (void const*)cell);
  169. roots.set(cell);
  170. } else {
  171. dbgln_if(HEAP_DEBUG, " #-> {}", (void const*)cell);
  172. }
  173. }
  174. }
  175. }
  176. }
  177. class MarkingVisitor final : public Cell::Visitor {
  178. public:
  179. MarkingVisitor() = default;
  180. virtual void visit_impl(Cell& cell) override
  181. {
  182. if (cell.is_marked())
  183. return;
  184. dbgln_if(HEAP_DEBUG, " ! {}", &cell);
  185. cell.set_marked(true);
  186. cell.visit_edges(*this);
  187. }
  188. };
  189. void Heap::mark_live_cells(HashTable<Cell*> const& roots)
  190. {
  191. dbgln_if(HEAP_DEBUG, "mark_live_cells:");
  192. MarkingVisitor visitor;
  193. for (auto* root : roots)
  194. visitor.visit(root);
  195. for (auto& inverse_root : m_uprooted_cells)
  196. inverse_root->set_marked(false);
  197. m_uprooted_cells.clear();
  198. }
  199. bool Heap::cell_must_survive_garbage_collection(Cell const& cell)
  200. {
  201. if (!cell.overrides_must_survive_garbage_collection({}))
  202. return false;
  203. return cell.must_survive_garbage_collection();
  204. }
  205. void Heap::finalize_unmarked_cells()
  206. {
  207. for_each_block([&](auto& block) {
  208. block.template for_each_cell_in_state<Cell::State::Live>([](Cell* cell) {
  209. if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell))
  210. cell->finalize();
  211. });
  212. return IterationDecision::Continue;
  213. });
  214. }
  215. void Heap::sweep_dead_cells(bool print_report, Core::ElapsedTimer const& measurement_timer)
  216. {
  217. dbgln_if(HEAP_DEBUG, "sweep_dead_cells:");
  218. Vector<HeapBlock*, 32> empty_blocks;
  219. Vector<HeapBlock*, 32> full_blocks_that_became_usable;
  220. size_t collected_cells = 0;
  221. size_t live_cells = 0;
  222. size_t collected_cell_bytes = 0;
  223. size_t live_cell_bytes = 0;
  224. for_each_block([&](auto& block) {
  225. bool block_has_live_cells = false;
  226. bool block_was_full = block.is_full();
  227. block.template for_each_cell_in_state<Cell::State::Live>([&](Cell* cell) {
  228. if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell)) {
  229. dbgln_if(HEAP_DEBUG, " ~ {}", cell);
  230. block.deallocate(cell);
  231. ++collected_cells;
  232. collected_cell_bytes += block.cell_size();
  233. } else {
  234. cell->set_marked(false);
  235. block_has_live_cells = true;
  236. ++live_cells;
  237. live_cell_bytes += block.cell_size();
  238. }
  239. });
  240. if (!block_has_live_cells)
  241. empty_blocks.append(&block);
  242. else if (block_was_full != block.is_full())
  243. full_blocks_that_became_usable.append(&block);
  244. return IterationDecision::Continue;
  245. });
  246. for (auto& weak_container : m_weak_containers)
  247. weak_container.remove_dead_cells({});
  248. for (auto* block : empty_blocks) {
  249. dbgln_if(HEAP_DEBUG, " - HeapBlock empty @ {}: cell_size={}", block, block->cell_size());
  250. allocator_for_size(block->cell_size()).block_did_become_empty({}, *block);
  251. }
  252. for (auto* block : full_blocks_that_became_usable) {
  253. dbgln_if(HEAP_DEBUG, " - HeapBlock usable again @ {}: cell_size={}", block, block->cell_size());
  254. allocator_for_size(block->cell_size()).block_did_become_usable({}, *block);
  255. }
  256. if constexpr (HEAP_DEBUG) {
  257. for_each_block([&](auto& block) {
  258. dbgln(" > Live HeapBlock @ {}: cell_size={}", &block, block.cell_size());
  259. return IterationDecision::Continue;
  260. });
  261. }
  262. if (print_report) {
  263. Time const time_spent = measurement_timer.elapsed_time();
  264. size_t live_block_count = 0;
  265. for_each_block([&](auto&) {
  266. ++live_block_count;
  267. return IterationDecision::Continue;
  268. });
  269. dbgln("Garbage collection report");
  270. dbgln("=============================================");
  271. dbgln(" Time spent: {} ms", time_spent.to_milliseconds());
  272. dbgln(" Live cells: {} ({} bytes)", live_cells, live_cell_bytes);
  273. dbgln("Collected cells: {} ({} bytes)", collected_cells, collected_cell_bytes);
  274. dbgln(" Live blocks: {} ({} bytes)", live_block_count, live_block_count * HeapBlock::block_size);
  275. dbgln(" Freed blocks: {} ({} bytes)", empty_blocks.size(), empty_blocks.size() * HeapBlock::block_size);
  276. dbgln("=============================================");
  277. }
  278. }
  279. void Heap::did_create_handle(Badge<HandleImpl>, HandleImpl& impl)
  280. {
  281. VERIFY(!m_handles.contains(impl));
  282. m_handles.append(impl);
  283. }
  284. void Heap::did_destroy_handle(Badge<HandleImpl>, HandleImpl& impl)
  285. {
  286. VERIFY(m_handles.contains(impl));
  287. m_handles.remove(impl);
  288. }
  289. void Heap::did_create_marked_vector(Badge<MarkedVectorBase>, MarkedVectorBase& vector)
  290. {
  291. VERIFY(!m_marked_vectors.contains(vector));
  292. m_marked_vectors.append(vector);
  293. }
  294. void Heap::did_destroy_marked_vector(Badge<MarkedVectorBase>, MarkedVectorBase& vector)
  295. {
  296. VERIFY(m_marked_vectors.contains(vector));
  297. m_marked_vectors.remove(vector);
  298. }
  299. void Heap::did_create_weak_container(Badge<WeakContainer>, WeakContainer& set)
  300. {
  301. VERIFY(!m_weak_containers.contains(set));
  302. m_weak_containers.append(set);
  303. }
  304. void Heap::did_destroy_weak_container(Badge<WeakContainer>, WeakContainer& set)
  305. {
  306. VERIFY(m_weak_containers.contains(set));
  307. m_weak_containers.remove(set);
  308. }
  309. void Heap::defer_gc(Badge<DeferGC>)
  310. {
  311. ++m_gc_deferrals;
  312. }
  313. void Heap::undefer_gc(Badge<DeferGC>)
  314. {
  315. VERIFY(m_gc_deferrals > 0);
  316. --m_gc_deferrals;
  317. if (!m_gc_deferrals) {
  318. if (m_should_gc_when_deferral_ends)
  319. collect_garbage();
  320. m_should_gc_when_deferral_ends = false;
  321. }
  322. }
  323. void Heap::uproot_cell(Cell* cell)
  324. {
  325. m_uprooted_cells.append(cell);
  326. }
  327. void register_safe_function_closure(void* base, size_t size)
  328. {
  329. if (!s_custom_ranges_for_conservative_scan) {
  330. // FIXME: This per-thread HashMap is currently leaked on thread exit.
  331. s_custom_ranges_for_conservative_scan = new HashMap<FlatPtr*, size_t>;
  332. }
  333. auto result = s_custom_ranges_for_conservative_scan->set(reinterpret_cast<FlatPtr*>(base), size);
  334. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  335. }
  336. void unregister_safe_function_closure(void* base, size_t)
  337. {
  338. VERIFY(s_custom_ranges_for_conservative_scan);
  339. bool did_remove = s_custom_ranges_for_conservative_scan->remove(reinterpret_cast<FlatPtr*>(base));
  340. VERIFY(did_remove);
  341. }
  342. }