Heap.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Badge.h>
  7. #include <AK/Debug.h>
  8. #include <AK/HashTable.h>
  9. #include <AK/StackInfo.h>
  10. #include <AK/TemporaryChange.h>
  11. #include <LibCore/ElapsedTimer.h>
  12. #include <LibJS/Heap/CellAllocator.h>
  13. #include <LibJS/Heap/Handle.h>
  14. #include <LibJS/Heap/Heap.h>
  15. #include <LibJS/Heap/HeapBlock.h>
  16. #include <LibJS/Interpreter.h>
  17. #include <LibJS/Runtime/Object.h>
  18. #include <LibJS/Runtime/WeakContainer.h>
  19. #include <LibJS/SafeFunction.h>
  20. #include <setjmp.h>
  21. #ifdef AK_OS_SERENITY
  22. # include <serenity.h>
  23. #endif
  24. namespace JS {
  25. #ifdef AK_OS_SERENITY
  26. static int gc_perf_string_id;
  27. #endif
  28. // NOTE: We keep a per-thread list of custom ranges. This hinges on the assumption that there is one JS VM per thread.
  29. static __thread HashMap<FlatPtr*, size_t>* s_custom_ranges_for_conservative_scan = nullptr;
  30. Heap::Heap(VM& vm)
  31. : m_vm(vm)
  32. {
  33. #ifdef AK_OS_SERENITY
  34. auto gc_signpost_string = "Garbage collection"sv;
  35. gc_perf_string_id = perf_register_string(gc_signpost_string.characters_without_null_termination(), gc_signpost_string.length());
  36. #endif
  37. if constexpr (HeapBlock::min_possible_cell_size <= 16) {
  38. m_allocators.append(make<CellAllocator>(16));
  39. }
  40. static_assert(HeapBlock::min_possible_cell_size <= 24, "Heap Cell tracking uses too much data!");
  41. m_allocators.append(make<CellAllocator>(32));
  42. m_allocators.append(make<CellAllocator>(64));
  43. m_allocators.append(make<CellAllocator>(96));
  44. m_allocators.append(make<CellAllocator>(128));
  45. m_allocators.append(make<CellAllocator>(256));
  46. m_allocators.append(make<CellAllocator>(512));
  47. m_allocators.append(make<CellAllocator>(1024));
  48. m_allocators.append(make<CellAllocator>(3072));
  49. }
  50. Heap::~Heap()
  51. {
  52. vm().string_cache().clear();
  53. collect_garbage(CollectionType::CollectEverything);
  54. }
  55. ALWAYS_INLINE CellAllocator& Heap::allocator_for_size(size_t cell_size)
  56. {
  57. for (auto& allocator : m_allocators) {
  58. if (allocator->cell_size() >= cell_size)
  59. return *allocator;
  60. }
  61. dbgln("Cannot get CellAllocator for cell size {}, largest available is {}!", cell_size, m_allocators.last()->cell_size());
  62. VERIFY_NOT_REACHED();
  63. }
  64. Cell* Heap::allocate_cell(size_t size)
  65. {
  66. if (should_collect_on_every_allocation()) {
  67. collect_garbage();
  68. } else if (m_allocations_since_last_gc > m_max_allocations_between_gc) {
  69. m_allocations_since_last_gc = 0;
  70. collect_garbage();
  71. } else {
  72. ++m_allocations_since_last_gc;
  73. }
  74. auto& allocator = allocator_for_size(size);
  75. return allocator.allocate_cell(*this);
  76. }
  77. void Heap::collect_garbage(CollectionType collection_type, bool print_report)
  78. {
  79. VERIFY(!m_collecting_garbage);
  80. TemporaryChange change(m_collecting_garbage, true);
  81. #ifdef AK_OS_SERENITY
  82. static size_t global_gc_counter = 0;
  83. perf_event(PERF_EVENT_SIGNPOST, gc_perf_string_id, global_gc_counter++);
  84. #endif
  85. auto collection_measurement_timer = Core::ElapsedTimer::start_new();
  86. if (collection_type == CollectionType::CollectGarbage) {
  87. if (m_gc_deferrals) {
  88. m_should_gc_when_deferral_ends = true;
  89. return;
  90. }
  91. HashTable<Cell*> roots;
  92. gather_roots(roots);
  93. mark_live_cells(roots);
  94. }
  95. finalize_unmarked_cells();
  96. sweep_dead_cells(print_report, collection_measurement_timer);
  97. }
  98. void Heap::gather_roots(HashTable<Cell*>& roots)
  99. {
  100. vm().gather_roots(roots);
  101. gather_conservative_roots(roots);
  102. for (auto& handle : m_handles)
  103. roots.set(handle.cell());
  104. for (auto& vector : m_marked_vectors)
  105. vector.gather_roots(roots);
  106. if constexpr (HEAP_DEBUG) {
  107. dbgln("gather_roots:");
  108. for (auto* root : roots)
  109. dbgln(" + {}", root);
  110. }
  111. }
  112. __attribute__((no_sanitize("address"))) void Heap::gather_conservative_roots(HashTable<Cell*>& roots)
  113. {
  114. FlatPtr dummy;
  115. dbgln_if(HEAP_DEBUG, "gather_conservative_roots:");
  116. jmp_buf buf;
  117. setjmp(buf);
  118. HashTable<FlatPtr> possible_pointers;
  119. auto* raw_jmp_buf = reinterpret_cast<FlatPtr const*>(buf);
  120. auto add_possible_value = [&](FlatPtr data) {
  121. if constexpr (sizeof(FlatPtr*) == sizeof(Value)) {
  122. // Because Value stores pointers in non-canonical form we have to check if the top bytes
  123. // match any pointer-backed tag, in that case we have to extract the pointer to its
  124. // canonical form and add that as a possible pointer.
  125. if ((data & SHIFTED_IS_CELL_PATTERN) == SHIFTED_IS_CELL_PATTERN)
  126. possible_pointers.set(Value::extract_pointer_bits(data));
  127. else
  128. possible_pointers.set(data);
  129. } else {
  130. static_assert((sizeof(Value) % sizeof(FlatPtr*)) == 0);
  131. // In the 32-bit case we will look at the top and bottom part of Value separately we just
  132. // add both the upper and lower bytes as possible pointers.
  133. possible_pointers.set(data);
  134. }
  135. };
  136. for (size_t i = 0; i < ((size_t)sizeof(buf)) / sizeof(FlatPtr); i += sizeof(FlatPtr))
  137. add_possible_value(raw_jmp_buf[i]);
  138. auto stack_reference = bit_cast<FlatPtr>(&dummy);
  139. auto& stack_info = m_vm.stack_info();
  140. for (FlatPtr stack_address = stack_reference; stack_address < stack_info.top(); stack_address += sizeof(FlatPtr)) {
  141. auto data = *reinterpret_cast<FlatPtr*>(stack_address);
  142. add_possible_value(data);
  143. }
  144. // NOTE: If we have any custom ranges registered, scan those as well.
  145. // This is where JS::SafeFunction closures get marked.
  146. if (s_custom_ranges_for_conservative_scan) {
  147. for (auto& custom_range : *s_custom_ranges_for_conservative_scan) {
  148. for (size_t i = 0; i < (custom_range.value / sizeof(FlatPtr)); ++i) {
  149. add_possible_value(custom_range.key[i]);
  150. }
  151. }
  152. }
  153. HashTable<HeapBlock*> all_live_heap_blocks;
  154. for_each_block([&](auto& block) {
  155. all_live_heap_blocks.set(&block);
  156. return IterationDecision::Continue;
  157. });
  158. for (auto possible_pointer : possible_pointers) {
  159. if (!possible_pointer)
  160. continue;
  161. dbgln_if(HEAP_DEBUG, " ? {}", (void const*)possible_pointer);
  162. auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<Cell const*>(possible_pointer));
  163. if (all_live_heap_blocks.contains(possible_heap_block)) {
  164. if (auto* cell = possible_heap_block->cell_from_possible_pointer(possible_pointer)) {
  165. if (cell->state() == Cell::State::Live) {
  166. dbgln_if(HEAP_DEBUG, " ?-> {}", (void const*)cell);
  167. roots.set(cell);
  168. } else {
  169. dbgln_if(HEAP_DEBUG, " #-> {}", (void const*)cell);
  170. }
  171. }
  172. }
  173. }
  174. }
  175. class MarkingVisitor final : public Cell::Visitor {
  176. public:
  177. MarkingVisitor() = default;
  178. virtual void visit_impl(Cell& cell) override
  179. {
  180. if (cell.is_marked())
  181. return;
  182. dbgln_if(HEAP_DEBUG, " ! {}", &cell);
  183. cell.set_marked(true);
  184. cell.visit_edges(*this);
  185. }
  186. };
  187. void Heap::mark_live_cells(HashTable<Cell*> const& roots)
  188. {
  189. dbgln_if(HEAP_DEBUG, "mark_live_cells:");
  190. MarkingVisitor visitor;
  191. for (auto* root : roots)
  192. visitor.visit(root);
  193. for (auto& inverse_root : m_uprooted_cells)
  194. inverse_root->set_marked(false);
  195. m_uprooted_cells.clear();
  196. }
  197. void Heap::finalize_unmarked_cells()
  198. {
  199. for_each_block([&](auto& block) {
  200. block.template for_each_cell_in_state<Cell::State::Live>([](Cell* cell) {
  201. if (!cell->is_marked())
  202. cell->finalize();
  203. });
  204. return IterationDecision::Continue;
  205. });
  206. }
  207. void Heap::sweep_dead_cells(bool print_report, Core::ElapsedTimer const& measurement_timer)
  208. {
  209. dbgln_if(HEAP_DEBUG, "sweep_dead_cells:");
  210. Vector<HeapBlock*, 32> empty_blocks;
  211. Vector<HeapBlock*, 32> full_blocks_that_became_usable;
  212. size_t collected_cells = 0;
  213. size_t live_cells = 0;
  214. size_t collected_cell_bytes = 0;
  215. size_t live_cell_bytes = 0;
  216. for_each_block([&](auto& block) {
  217. bool block_has_live_cells = false;
  218. bool block_was_full = block.is_full();
  219. block.template for_each_cell_in_state<Cell::State::Live>([&](Cell* cell) {
  220. if (!cell->is_marked()) {
  221. dbgln_if(HEAP_DEBUG, " ~ {}", cell);
  222. block.deallocate(cell);
  223. ++collected_cells;
  224. collected_cell_bytes += block.cell_size();
  225. } else {
  226. cell->set_marked(false);
  227. block_has_live_cells = true;
  228. ++live_cells;
  229. live_cell_bytes += block.cell_size();
  230. }
  231. });
  232. if (!block_has_live_cells)
  233. empty_blocks.append(&block);
  234. else if (block_was_full != block.is_full())
  235. full_blocks_that_became_usable.append(&block);
  236. return IterationDecision::Continue;
  237. });
  238. for (auto& weak_container : m_weak_containers)
  239. weak_container.remove_dead_cells({});
  240. for (auto* block : empty_blocks) {
  241. dbgln_if(HEAP_DEBUG, " - HeapBlock empty @ {}: cell_size={}", block, block->cell_size());
  242. allocator_for_size(block->cell_size()).block_did_become_empty({}, *block);
  243. }
  244. for (auto* block : full_blocks_that_became_usable) {
  245. dbgln_if(HEAP_DEBUG, " - HeapBlock usable again @ {}: cell_size={}", block, block->cell_size());
  246. allocator_for_size(block->cell_size()).block_did_become_usable({}, *block);
  247. }
  248. if constexpr (HEAP_DEBUG) {
  249. for_each_block([&](auto& block) {
  250. dbgln(" > Live HeapBlock @ {}: cell_size={}", &block, block.cell_size());
  251. return IterationDecision::Continue;
  252. });
  253. }
  254. int time_spent = measurement_timer.elapsed();
  255. if (print_report) {
  256. size_t live_block_count = 0;
  257. for_each_block([&](auto&) {
  258. ++live_block_count;
  259. return IterationDecision::Continue;
  260. });
  261. dbgln("Garbage collection report");
  262. dbgln("=============================================");
  263. dbgln(" Time spent: {} ms", time_spent);
  264. dbgln(" Live cells: {} ({} bytes)", live_cells, live_cell_bytes);
  265. dbgln("Collected cells: {} ({} bytes)", collected_cells, collected_cell_bytes);
  266. dbgln(" Live blocks: {} ({} bytes)", live_block_count, live_block_count * HeapBlock::block_size);
  267. dbgln(" Freed blocks: {} ({} bytes)", empty_blocks.size(), empty_blocks.size() * HeapBlock::block_size);
  268. dbgln("=============================================");
  269. }
  270. }
  271. void Heap::did_create_handle(Badge<HandleImpl>, HandleImpl& impl)
  272. {
  273. VERIFY(!m_handles.contains(impl));
  274. m_handles.append(impl);
  275. }
  276. void Heap::did_destroy_handle(Badge<HandleImpl>, HandleImpl& impl)
  277. {
  278. VERIFY(m_handles.contains(impl));
  279. m_handles.remove(impl);
  280. }
  281. void Heap::did_create_marked_vector(Badge<MarkedVectorBase>, MarkedVectorBase& vector)
  282. {
  283. VERIFY(!m_marked_vectors.contains(vector));
  284. m_marked_vectors.append(vector);
  285. }
  286. void Heap::did_destroy_marked_vector(Badge<MarkedVectorBase>, MarkedVectorBase& vector)
  287. {
  288. VERIFY(m_marked_vectors.contains(vector));
  289. m_marked_vectors.remove(vector);
  290. }
  291. void Heap::did_create_weak_container(Badge<WeakContainer>, WeakContainer& set)
  292. {
  293. VERIFY(!m_weak_containers.contains(set));
  294. m_weak_containers.append(set);
  295. }
  296. void Heap::did_destroy_weak_container(Badge<WeakContainer>, WeakContainer& set)
  297. {
  298. VERIFY(m_weak_containers.contains(set));
  299. m_weak_containers.remove(set);
  300. }
  301. void Heap::defer_gc(Badge<DeferGC>)
  302. {
  303. ++m_gc_deferrals;
  304. }
  305. void Heap::undefer_gc(Badge<DeferGC>)
  306. {
  307. VERIFY(m_gc_deferrals > 0);
  308. --m_gc_deferrals;
  309. if (!m_gc_deferrals) {
  310. if (m_should_gc_when_deferral_ends)
  311. collect_garbage();
  312. m_should_gc_when_deferral_ends = false;
  313. }
  314. }
  315. void Heap::uproot_cell(Cell* cell)
  316. {
  317. m_uprooted_cells.append(cell);
  318. }
  319. void register_safe_function_closure(void* base, size_t size)
  320. {
  321. if (!s_custom_ranges_for_conservative_scan) {
  322. // FIXME: This per-thread HashMap is currently leaked on thread exit.
  323. s_custom_ranges_for_conservative_scan = new HashMap<FlatPtr*, size_t>;
  324. }
  325. auto result = s_custom_ranges_for_conservative_scan->set(reinterpret_cast<FlatPtr*>(base), size);
  326. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  327. }
  328. void unregister_safe_function_closure(void* base, size_t)
  329. {
  330. VERIFY(s_custom_ranges_for_conservative_scan);
  331. bool did_remove = s_custom_ranges_for_conservative_scan->remove(reinterpret_cast<FlatPtr*>(base));
  332. VERIFY(did_remove);
  333. }
  334. }