Heap.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (c) 2020, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Bitmap.h>
  8. #include <AK/ScopeGuard.h>
  9. #include <AK/TemporaryChange.h>
  10. #include <AK/Vector.h>
  11. #include <AK/kmalloc.h>
  12. namespace Kernel {
  13. template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0>
  14. class Heap {
  15. AK_MAKE_NONCOPYABLE(Heap);
  16. struct AllocationHeader {
  17. size_t allocation_size_in_chunks;
  18. #if ARCH(X86_64)
  19. // FIXME: Get rid of this somehow
  20. size_t alignment_dummy;
  21. #endif
  22. u8 data[0];
  23. };
  24. static_assert(CHUNK_SIZE >= sizeof(AllocationHeader));
  25. ALWAYS_INLINE AllocationHeader* allocation_header(void* ptr)
  26. {
  27. return (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
  28. }
  29. ALWAYS_INLINE const AllocationHeader* allocation_header(const void* ptr) const
  30. {
  31. return (const AllocationHeader*)((((const u8*)ptr) - sizeof(AllocationHeader)));
  32. }
  33. static size_t calculate_chunks(size_t memory_size)
  34. {
  35. return (sizeof(u8) * memory_size) / (sizeof(u8) * CHUNK_SIZE + 1);
  36. }
  37. public:
  38. Heap(u8* memory, size_t memory_size)
  39. : m_total_chunks(calculate_chunks(memory_size))
  40. , m_chunks(memory)
  41. , m_bitmap(memory + m_total_chunks * CHUNK_SIZE, m_total_chunks)
  42. {
  43. // To keep the alignment of the memory passed in, place the bitmap
  44. // at the end of the memory block.
  45. VERIFY(m_total_chunks * CHUNK_SIZE + (m_total_chunks + 7) / 8 <= memory_size);
  46. }
  47. ~Heap() = default;
  48. static size_t calculate_memory_for_bytes(size_t bytes)
  49. {
  50. size_t needed_chunks = (sizeof(AllocationHeader) + bytes + CHUNK_SIZE - 1) / CHUNK_SIZE;
  51. return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8;
  52. }
  53. void* allocate(size_t size)
  54. {
  55. // We need space for the AllocationHeader at the head of the block.
  56. size_t real_size = size + sizeof(AllocationHeader);
  57. size_t chunks_needed = (real_size + CHUNK_SIZE - 1) / CHUNK_SIZE;
  58. if (chunks_needed > free_chunks())
  59. return nullptr;
  60. Optional<size_t> first_chunk;
  61. // Choose the right policy for allocation.
  62. constexpr u32 best_fit_threshold = 128;
  63. if (chunks_needed < best_fit_threshold) {
  64. first_chunk = m_bitmap.find_first_fit(chunks_needed);
  65. } else {
  66. first_chunk = m_bitmap.find_best_fit(chunks_needed);
  67. }
  68. if (!first_chunk.has_value())
  69. return nullptr;
  70. auto* a = (AllocationHeader*)(m_chunks + (first_chunk.value() * CHUNK_SIZE));
  71. u8* ptr = a->data;
  72. a->allocation_size_in_chunks = chunks_needed;
  73. m_bitmap.set_range_and_verify_that_all_bits_flip(first_chunk.value(), chunks_needed, true);
  74. m_allocated_chunks += chunks_needed;
  75. if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) {
  76. __builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader));
  77. }
  78. return ptr;
  79. }
  80. void deallocate(void* ptr)
  81. {
  82. if (!ptr)
  83. return;
  84. auto* a = allocation_header(ptr);
  85. VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
  86. FlatPtr start = ((FlatPtr)a - (FlatPtr)m_chunks) / CHUNK_SIZE;
  87. // First, verify that the start of the allocation at `ptr` is actually allocated.
  88. VERIFY(m_bitmap.get(start));
  89. VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
  90. m_bitmap.set_range_and_verify_that_all_bits_flip(start, a->allocation_size_in_chunks, false);
  91. VERIFY(m_allocated_chunks >= a->allocation_size_in_chunks);
  92. m_allocated_chunks -= a->allocation_size_in_chunks;
  93. if constexpr (HEAP_SCRUB_BYTE_FREE != 0) {
  94. __builtin_memset(a, HEAP_SCRUB_BYTE_FREE, a->allocation_size_in_chunks * CHUNK_SIZE);
  95. }
  96. }
  97. bool contains(const void* ptr) const
  98. {
  99. const auto* a = allocation_header(ptr);
  100. if ((const u8*)a < m_chunks)
  101. return false;
  102. if ((const u8*)ptr >= m_chunks + m_total_chunks * CHUNK_SIZE)
  103. return false;
  104. return true;
  105. }
  106. u8* memory() const { return m_chunks; }
  107. size_t total_chunks() const { return m_total_chunks; }
  108. size_t total_bytes() const { return m_total_chunks * CHUNK_SIZE; }
  109. size_t free_chunks() const { return m_total_chunks - m_allocated_chunks; };
  110. size_t free_bytes() const { return free_chunks() * CHUNK_SIZE; }
  111. size_t allocated_chunks() const { return m_allocated_chunks; }
  112. size_t allocated_bytes() const { return m_allocated_chunks * CHUNK_SIZE; }
  113. private:
  114. size_t m_total_chunks { 0 };
  115. size_t m_allocated_chunks { 0 };
  116. u8* m_chunks { nullptr };
  117. Bitmap m_bitmap;
  118. };
  119. template<typename ExpandHeap>
  120. struct ExpandableHeapTraits {
  121. static bool add_memory(ExpandHeap& expand, size_t allocation_request)
  122. {
  123. return expand.add_memory(allocation_request);
  124. }
  125. static bool remove_memory(ExpandHeap& expand, void* memory)
  126. {
  127. return expand.remove_memory(memory);
  128. }
  129. };
  130. struct DefaultExpandHeap {
  131. bool add_memory(size_t)
  132. {
  133. // Requires explicit implementation
  134. return false;
  135. }
  136. bool remove_memory(void*)
  137. {
  138. return false;
  139. }
  140. };
  141. template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0, typename ExpandHeap = DefaultExpandHeap>
  142. class ExpandableHeap {
  143. AK_MAKE_NONCOPYABLE(ExpandableHeap);
  144. AK_MAKE_NONMOVABLE(ExpandableHeap);
  145. public:
  146. using ExpandHeapType = ExpandHeap;
  147. using HeapType = Heap<CHUNK_SIZE, HEAP_SCRUB_BYTE_ALLOC, HEAP_SCRUB_BYTE_FREE>;
  148. struct SubHeap {
  149. HeapType heap;
  150. SubHeap* next { nullptr };
  151. size_t memory_size { 0 };
  152. template<typename... Args>
  153. SubHeap(size_t memory_size, Args&&... args)
  154. : heap(forward<Args>(args)...)
  155. , memory_size(memory_size)
  156. {
  157. }
  158. };
  159. ExpandableHeap(u8* memory, size_t memory_size, const ExpandHeapType& expand = ExpandHeapType())
  160. : m_heaps(memory_size, memory, memory_size)
  161. , m_expand(expand)
  162. {
  163. }
  164. ~ExpandableHeap()
  165. {
  166. // We don't own the main heap, only remove memory that we added previously
  167. SubHeap* next;
  168. for (auto* heap = m_heaps.next; heap; heap = next) {
  169. next = heap->next;
  170. heap->~SubHeap();
  171. ExpandableHeapTraits<ExpandHeap>::remove_memory(m_expand, (void*)heap);
  172. }
  173. }
  174. static size_t calculate_memory_for_bytes(size_t bytes)
  175. {
  176. return sizeof(SubHeap) + HeapType::calculate_memory_for_bytes(bytes);
  177. }
  178. bool expand_memory(size_t size)
  179. {
  180. if (m_expanding)
  181. return false;
  182. // Allocating more memory itself may trigger allocations and deallocations
  183. // on this heap. We need to prevent recursive expansion. We also disable
  184. // removing memory while trying to expand the heap.
  185. TemporaryChange change(m_expanding, true);
  186. return ExpandableHeapTraits<ExpandHeap>::add_memory(m_expand, size);
  187. }
  188. void* allocate(size_t size)
  189. {
  190. int attempt = 0;
  191. do {
  192. for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
  193. if (void* ptr = subheap->heap.allocate(size))
  194. return ptr;
  195. }
  196. // We need to loop because we won't know how much memory was added.
  197. // Even though we make a best guess how much memory needs to be added,
  198. // it doesn't guarantee that enough will be available after adding it.
  199. // This is especially true for the kmalloc heap, where adding memory
  200. // requires several other objects to be allocated just to be able to
  201. // expand the heap.
  202. // To avoid an infinite expansion loop, limit to two attempts
  203. if (attempt++ >= 2)
  204. break;
  205. } while (expand_memory(size));
  206. return nullptr;
  207. }
  208. void deallocate(void* ptr)
  209. {
  210. if (!ptr)
  211. return;
  212. for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
  213. if (subheap->heap.contains(ptr)) {
  214. subheap->heap.deallocate(ptr);
  215. if (subheap->heap.allocated_chunks() == 0 && subheap != &m_heaps && !m_expanding) {
  216. // remove_memory expects the memory to be unused and
  217. // may deallocate the memory. We need to therefore first
  218. // unlink the subheap and destroy it. If remove_memory
  219. // ends up not not removing the memory, we'll initialize
  220. // a new subheap and re-add it.
  221. // We need to remove the subheap before calling remove_memory
  222. // because it's possible that remove_memory itself could
  223. // cause a memory allocation that we don't want to end up
  224. // potentially being made in the subheap we're about to remove.
  225. {
  226. auto* subheap2 = m_heaps.next;
  227. auto** subheap_link = &m_heaps.next;
  228. while (subheap2 != subheap) {
  229. subheap_link = &subheap2->next;
  230. subheap2 = subheap2->next;
  231. }
  232. *subheap_link = subheap->next;
  233. }
  234. auto memory_size = subheap->memory_size;
  235. subheap->~SubHeap();
  236. if (!ExpandableHeapTraits<ExpandHeap>::remove_memory(m_expand, subheap)) {
  237. // Removal of the subheap was rejected, add it back in and
  238. // re-initialize with a clean subheap.
  239. add_subheap(subheap, memory_size);
  240. }
  241. }
  242. return;
  243. }
  244. }
  245. VERIFY_NOT_REACHED();
  246. }
  247. HeapType& add_subheap(void* memory, size_t memory_size)
  248. {
  249. VERIFY(memory_size > sizeof(SubHeap));
  250. // Place the SubHeap structure at the beginning of the new memory block
  251. memory_size -= sizeof(SubHeap);
  252. SubHeap* new_heap = (SubHeap*)memory;
  253. new (new_heap) SubHeap(memory_size, (u8*)(new_heap + 1), memory_size);
  254. // Add the subheap to the list (but leave the main heap where it is)
  255. SubHeap* next_heap = m_heaps.next;
  256. SubHeap** next_heap_link = &m_heaps.next;
  257. while (next_heap) {
  258. if (new_heap->heap.memory() < next_heap->heap.memory())
  259. break;
  260. next_heap_link = &next_heap->next;
  261. next_heap = next_heap->next;
  262. }
  263. new_heap->next = *next_heap_link;
  264. *next_heap_link = new_heap;
  265. return new_heap->heap;
  266. }
  267. bool contains(const void* ptr) const
  268. {
  269. for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
  270. if (subheap->heap.contains(ptr))
  271. return true;
  272. }
  273. return false;
  274. }
  275. size_t total_chunks() const
  276. {
  277. size_t total = 0;
  278. for (auto* subheap = &m_heaps; subheap; subheap = subheap->next)
  279. total += subheap->heap.total_chunks();
  280. return total;
  281. }
  282. size_t total_bytes() const { return total_chunks() * CHUNK_SIZE; }
  283. size_t free_chunks() const
  284. {
  285. size_t total = 0;
  286. for (auto* subheap = &m_heaps; subheap; subheap = subheap->next)
  287. total += subheap->heap.free_chunks();
  288. return total;
  289. }
  290. size_t free_bytes() const { return free_chunks() * CHUNK_SIZE; }
  291. size_t allocated_chunks() const
  292. {
  293. size_t total = 0;
  294. for (auto* subheap = &m_heaps; subheap; subheap = subheap->next)
  295. total += subheap->heap.allocated_chunks();
  296. return total;
  297. }
  298. size_t allocated_bytes() const { return allocated_chunks() * CHUNK_SIZE; }
  299. private:
  300. SubHeap m_heaps;
  301. ExpandHeap m_expand;
  302. bool m_expanding { false };
  303. };
  304. }