kmalloc.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. /*
  7. * Really really *really* Q&D malloc() and free() implementations
  8. * just to get going. Don't ever let anyone see this shit. :^)
  9. */
  10. #include <AK/Assertions.h>
  11. #include <AK/NonnullOwnPtrVector.h>
  12. #include <AK/Types.h>
  13. #include <Kernel/Arch/x86/CPU.h>
  14. #include <Kernel/Debug.h>
  15. #include <Kernel/Heap/Heap.h>
  16. #include <Kernel/Heap/kmalloc.h>
  17. #include <Kernel/KSyms.h>
  18. #include <Kernel/Panic.h>
  19. #include <Kernel/Process.h>
  20. #include <Kernel/Scheduler.h>
  21. #include <Kernel/SpinLock.h>
  22. #include <Kernel/StdLib.h>
  23. #include <Kernel/VM/MemoryManager.h>
  24. #define CHUNK_SIZE 32
  25. #define POOL_SIZE (2 * MiB)
  26. #define ETERNAL_RANGE_SIZE (2 * MiB)
  27. static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
  28. static void kmalloc_allocate_backup_memory();
  29. struct KmallocGlobalHeap {
  30. struct ExpandGlobalHeap {
  31. KmallocGlobalHeap& m_global_heap;
  32. ExpandGlobalHeap(KmallocGlobalHeap& global_heap)
  33. : m_global_heap(global_heap)
  34. {
  35. }
  36. bool m_adding { false };
  37. bool add_memory(size_t allocation_request)
  38. {
  39. if (!MemoryManager::is_initialized()) {
  40. if constexpr (KMALLOC_DEBUG) {
  41. dmesgln("kmalloc: Cannot expand heap before MM is initialized!");
  42. }
  43. return false;
  44. }
  45. VERIFY(!m_adding);
  46. TemporaryChange change(m_adding, true);
  47. // At this point we have very little memory left. Any attempt to
  48. // kmalloc() could fail, so use our backup memory first, so we
  49. // can't really reliably allocate even a new region of memory.
  50. // This is why we keep a backup region, which we can
  51. auto region = move(m_global_heap.m_backup_memory);
  52. if (!region) {
  53. // Be careful to not log too much here. We don't want to trigger
  54. // any further calls to kmalloc(). We're already out of memory
  55. // and don't have any backup memory, either!
  56. if constexpr (KMALLOC_DEBUG) {
  57. dmesgln("kmalloc: Cannot expand heap: no backup memory");
  58. }
  59. return false;
  60. }
  61. // At this point we should have at least enough memory from the
  62. // backup region to be able to log properly
  63. if constexpr (KMALLOC_DEBUG) {
  64. dmesgln("kmalloc: Adding memory to heap at {}, bytes: {}", region->vaddr(), region->size());
  65. }
  66. auto& subheap = m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  67. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  68. // Since we pulled in our backup heap, make sure we allocate another
  69. // backup heap before returning. Otherwise we potentially lose
  70. // the ability to expand the heap next time we get called.
  71. ScopeGuard guard([&]() {
  72. // We may need to defer allocating backup memory because the
  73. // heap expansion may have been triggered while holding some
  74. // other spinlock. If the expansion happens to need the same
  75. // spinlock we would deadlock. So, if we're in any lock, defer
  76. Processor::current().deferred_call_queue(kmalloc_allocate_backup_memory);
  77. });
  78. // Now that we added our backup memory, check if the backup heap
  79. // was big enough to likely satisfy the request
  80. if (subheap.free_bytes() < allocation_request) {
  81. // Looks like we probably need more
  82. size_t memory_size = page_round_up(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request));
  83. // Add some more to the new heap. We're already using it for other
  84. // allocations not including the original allocation_request
  85. // that triggered heap expansion. If we don't allocate
  86. memory_size += 1 * MiB;
  87. region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
  88. if (region) {
  89. dbgln("kmalloc: Adding even more memory to heap at {}, bytes: {}", region->vaddr(), region->size());
  90. m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  91. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  92. } else {
  93. dbgln("kmalloc: Could not expand heap to satisfy allocation of {} bytes", allocation_request);
  94. return false;
  95. }
  96. }
  97. return true;
  98. }
  99. bool remove_memory(void* memory)
  100. {
  101. // This is actually relatively unlikely to happen, because it requires that all
  102. // allocated memory in a subheap to be freed. Only then the subheap can be removed...
  103. for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) {
  104. if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) {
  105. auto region = m_global_heap.m_subheap_memory.take(i);
  106. if (!m_global_heap.m_backup_memory) {
  107. if constexpr (KMALLOC_DEBUG) {
  108. dmesgln("kmalloc: Using removed memory as backup: {}, bytes: {}", region->vaddr(), region->size());
  109. }
  110. m_global_heap.m_backup_memory = move(region);
  111. } else {
  112. if constexpr (KMALLOC_DEBUG) {
  113. dmesgln("kmalloc: Queue removing memory from heap at {}, bytes: {}", region->vaddr(), region->size());
  114. }
  115. Processor::deferred_call_queue([this, region = move(region)]() mutable {
  116. // We need to defer freeing the region to prevent a potential
  117. // deadlock since we are still holding the kmalloc lock
  118. // We don't really need to do anything other than holding
  119. // onto the region. Unless we already used the backup
  120. // memory, in which case we want to use the region as the
  121. // new backup.
  122. ScopedSpinLock lock(s_lock);
  123. if (!m_global_heap.m_backup_memory) {
  124. if constexpr (KMALLOC_DEBUG) {
  125. dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size());
  126. }
  127. m_global_heap.m_backup_memory = move(region);
  128. } else {
  129. if constexpr (KMALLOC_DEBUG) {
  130. dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be freed now", region->vaddr(), region->size());
  131. }
  132. }
  133. });
  134. }
  135. return true;
  136. }
  137. }
  138. if constexpr (KMALLOC_DEBUG) {
  139. dmesgln("kmalloc: Cannot remove memory from heap: {}", VirtualAddress(memory));
  140. }
  141. return false;
  142. }
  143. };
  144. typedef ExpandableHeap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE, ExpandGlobalHeap> HeapType;
  145. HeapType m_heap;
  146. NonnullOwnPtrVector<Region> m_subheap_memory;
  147. OwnPtr<Region> m_backup_memory;
  148. KmallocGlobalHeap(u8* memory, size_t memory_size)
  149. : m_heap(memory, memory_size, ExpandGlobalHeap(*this))
  150. {
  151. }
  152. void allocate_backup_memory()
  153. {
  154. if (m_backup_memory)
  155. return;
  156. m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
  157. }
  158. size_t backup_memory_bytes() const
  159. {
  160. return m_backup_memory ? m_backup_memory->size() : 0;
  161. }
  162. };
  163. READONLY_AFTER_INIT static KmallocGlobalHeap* g_kmalloc_global;
  164. static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalHeap)];
  165. // Treat the heap as logically separate from .bss
  166. __attribute__((section(".heap"))) static u8 kmalloc_eternal_heap[ETERNAL_RANGE_SIZE];
  167. __attribute__((section(".heap"))) static u8 kmalloc_pool_heap[POOL_SIZE];
  168. static size_t g_kmalloc_bytes_eternal = 0;
  169. static size_t g_kmalloc_call_count;
  170. static size_t g_kfree_call_count;
  171. bool g_dump_kmalloc_stacks;
  172. static u8* s_next_eternal_ptr;
  173. READONLY_AFTER_INIT static u8* s_end_of_eternal_range;
  174. static void kmalloc_allocate_backup_memory()
  175. {
  176. g_kmalloc_global->allocate_backup_memory();
  177. }
  178. void kmalloc_enable_expand()
  179. {
  180. g_kmalloc_global->allocate_backup_memory();
  181. }
  182. static inline void kmalloc_verify_nospinlock_held()
  183. {
  184. // Catch bad callers allocating under spinlock.
  185. if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
  186. VERIFY(!Processor::current().in_critical());
  187. }
  188. }
  189. UNMAP_AFTER_INIT void kmalloc_init()
  190. {
  191. // Zero out heap since it's placed after end_of_kernel_bss.
  192. memset(kmalloc_eternal_heap, 0, sizeof(kmalloc_eternal_heap));
  193. memset(kmalloc_pool_heap, 0, sizeof(kmalloc_pool_heap));
  194. g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalHeap(kmalloc_pool_heap, sizeof(kmalloc_pool_heap));
  195. s_lock.initialize();
  196. s_next_eternal_ptr = kmalloc_eternal_heap;
  197. s_end_of_eternal_range = s_next_eternal_ptr + sizeof(kmalloc_pool_heap);
  198. }
  199. void* kmalloc_eternal(size_t size)
  200. {
  201. kmalloc_verify_nospinlock_held();
  202. size = round_up_to_power_of_two(size, sizeof(void*));
  203. ScopedSpinLock lock(s_lock);
  204. void* ptr = s_next_eternal_ptr;
  205. s_next_eternal_ptr += size;
  206. VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
  207. g_kmalloc_bytes_eternal += size;
  208. return ptr;
  209. }
  210. void* kmalloc(size_t size)
  211. {
  212. kmalloc_verify_nospinlock_held();
  213. ScopedSpinLock lock(s_lock);
  214. ++g_kmalloc_call_count;
  215. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  216. dbgln("kmalloc({})", size);
  217. Kernel::dump_backtrace();
  218. }
  219. void* ptr = g_kmalloc_global->m_heap.allocate(size);
  220. if (!ptr) {
  221. PANIC("kmalloc: Out of memory (requested size: {})", size);
  222. }
  223. return ptr;
  224. }
  225. void kfree(void* ptr)
  226. {
  227. if (!ptr)
  228. return;
  229. kmalloc_verify_nospinlock_held();
  230. ScopedSpinLock lock(s_lock);
  231. ++g_kfree_call_count;
  232. g_kmalloc_global->m_heap.deallocate(ptr);
  233. }
  234. void* krealloc(void* ptr, size_t new_size)
  235. {
  236. kmalloc_verify_nospinlock_held();
  237. ScopedSpinLock lock(s_lock);
  238. return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
  239. }
  240. void* operator new(size_t size) noexcept
  241. {
  242. return kmalloc(size);
  243. }
  244. void* operator new[](size_t size) noexcept
  245. {
  246. return kmalloc(size);
  247. }
  248. void operator delete(void* ptr) noexcept
  249. {
  250. return kfree(ptr);
  251. }
  252. void operator delete(void* ptr, size_t) noexcept
  253. {
  254. return kfree(ptr);
  255. }
  256. void operator delete[](void* ptr) noexcept
  257. {
  258. return kfree(ptr);
  259. }
  260. void operator delete[](void* ptr, size_t) noexcept
  261. {
  262. return kfree(ptr);
  263. }
  264. void get_kmalloc_stats(kmalloc_stats& stats)
  265. {
  266. ScopedSpinLock lock(s_lock);
  267. stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes();
  268. stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes();
  269. stats.bytes_eternal = g_kmalloc_bytes_eternal;
  270. stats.kmalloc_call_count = g_kmalloc_call_count;
  271. stats.kfree_call_count = g_kfree_call_count;
  272. }