kmalloc.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. /*
  7. * Really really *really* Q&D malloc() and free() implementations
  8. * just to get going. Don't ever let anyone see this shit. :^)
  9. */
  10. #include <AK/Assertions.h>
  11. #include <AK/NonnullOwnPtrVector.h>
  12. #include <AK/Types.h>
  13. #include <Kernel/Debug.h>
  14. #include <Kernel/Heap/Heap.h>
  15. #include <Kernel/Heap/kmalloc.h>
  16. #include <Kernel/KSyms.h>
  17. #include <Kernel/Locking/Spinlock.h>
  18. #include <Kernel/Memory/MemoryManager.h>
  19. #include <Kernel/Panic.h>
  20. #include <Kernel/PerformanceManager.h>
  21. #include <Kernel/Sections.h>
  22. #include <Kernel/StdLib.h>
  23. #define CHUNK_SIZE 32
  24. #define POOL_SIZE (2 * MiB)
  25. #define ETERNAL_RANGE_SIZE (4 * MiB)
  26. namespace std {
  27. const nothrow_t nothrow;
  28. }
  29. static RecursiveSpinlock s_lock; // needs to be recursive because of dump_backtrace()
  30. static void kmalloc_allocate_backup_memory();
  31. struct KmallocGlobalHeap {
  32. struct ExpandGlobalHeap {
  33. KmallocGlobalHeap& m_global_heap;
  34. ExpandGlobalHeap(KmallocGlobalHeap& global_heap)
  35. : m_global_heap(global_heap)
  36. {
  37. }
  38. bool m_adding { false };
  39. bool add_memory(size_t allocation_request)
  40. {
  41. if (!Memory::MemoryManager::is_initialized()) {
  42. if constexpr (KMALLOC_DEBUG) {
  43. dmesgln("kmalloc: Cannot expand heap before MM is initialized!");
  44. }
  45. return false;
  46. }
  47. VERIFY(!m_adding);
  48. TemporaryChange change(m_adding, true);
  49. // At this point we have very little memory left. Any attempt to
  50. // kmalloc() could fail, so use our backup memory first, so we
  51. // can't really reliably allocate even a new region of memory.
  52. // This is why we keep a backup region, which we can
  53. auto region = move(m_global_heap.m_backup_memory);
  54. if (!region) {
  55. // Be careful to not log too much here. We don't want to trigger
  56. // any further calls to kmalloc(). We're already out of memory
  57. // and don't have any backup memory, either!
  58. if constexpr (KMALLOC_DEBUG) {
  59. dmesgln("kmalloc: Cannot expand heap: no backup memory");
  60. }
  61. return false;
  62. }
  63. // At this point we should have at least enough memory from the
  64. // backup region to be able to log properly
  65. if constexpr (KMALLOC_DEBUG) {
  66. dmesgln("kmalloc: Adding memory to heap at {}, bytes: {}", region->vaddr(), region->size());
  67. }
  68. auto& subheap = m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  69. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  70. // Since we pulled in our backup heap, make sure we allocate another
  71. // backup heap before returning. Otherwise we potentially lose
  72. // the ability to expand the heap next time we get called.
  73. ScopeGuard guard([&]() {
  74. // We may need to defer allocating backup memory because the
  75. // heap expansion may have been triggered while holding some
  76. // other spinlock. If the expansion happens to need the same
  77. // spinlock we would deadlock. So, if we're in any lock, defer
  78. Processor::current().deferred_call_queue(kmalloc_allocate_backup_memory);
  79. });
  80. // Now that we added our backup memory, check if the backup heap
  81. // was big enough to likely satisfy the request
  82. if (subheap.free_bytes() < allocation_request) {
  83. // Looks like we probably need more
  84. size_t memory_size = Memory::page_round_up(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request));
  85. // Add some more to the new heap. We're already using it for other
  86. // allocations not including the original allocation_request
  87. // that triggered heap expansion. If we don't allocate
  88. memory_size += 1 * MiB;
  89. region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
  90. if (region) {
  91. dbgln("kmalloc: Adding even more memory to heap at {}, bytes: {}", region->vaddr(), region->size());
  92. m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  93. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  94. } else {
  95. dbgln("kmalloc: Could not expand heap to satisfy allocation of {} bytes", allocation_request);
  96. return false;
  97. }
  98. }
  99. return true;
  100. }
  101. bool remove_memory(void* memory)
  102. {
  103. // This is actually relatively unlikely to happen, because it requires that all
  104. // allocated memory in a subheap to be freed. Only then the subheap can be removed...
  105. for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) {
  106. if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) {
  107. auto region = m_global_heap.m_subheap_memory.take(i);
  108. if (!m_global_heap.m_backup_memory) {
  109. if constexpr (KMALLOC_DEBUG) {
  110. dmesgln("kmalloc: Using removed memory as backup: {}, bytes: {}", region->vaddr(), region->size());
  111. }
  112. m_global_heap.m_backup_memory = move(region);
  113. } else {
  114. if constexpr (KMALLOC_DEBUG) {
  115. dmesgln("kmalloc: Queue removing memory from heap at {}, bytes: {}", region->vaddr(), region->size());
  116. }
  117. Processor::deferred_call_queue([this, region = move(region)]() mutable {
  118. // We need to defer freeing the region to prevent a potential
  119. // deadlock since we are still holding the kmalloc lock
  120. // We don't really need to do anything other than holding
  121. // onto the region. Unless we already used the backup
  122. // memory, in which case we want to use the region as the
  123. // new backup.
  124. SpinlockLocker lock(s_lock);
  125. if (!m_global_heap.m_backup_memory) {
  126. if constexpr (KMALLOC_DEBUG) {
  127. dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size());
  128. }
  129. m_global_heap.m_backup_memory = move(region);
  130. } else {
  131. if constexpr (KMALLOC_DEBUG) {
  132. dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be freed now", region->vaddr(), region->size());
  133. }
  134. }
  135. });
  136. }
  137. return true;
  138. }
  139. }
  140. if constexpr (KMALLOC_DEBUG) {
  141. dmesgln("kmalloc: Cannot remove memory from heap: {}", VirtualAddress(memory));
  142. }
  143. return false;
  144. }
  145. };
  146. typedef ExpandableHeap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE, ExpandGlobalHeap> HeapType;
  147. HeapType m_heap;
  148. NonnullOwnPtrVector<Memory::Region> m_subheap_memory;
  149. OwnPtr<Memory::Region> m_backup_memory;
  150. KmallocGlobalHeap(u8* memory, size_t memory_size)
  151. : m_heap(memory, memory_size, ExpandGlobalHeap(*this))
  152. {
  153. }
  154. void allocate_backup_memory()
  155. {
  156. if (m_backup_memory)
  157. return;
  158. m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
  159. }
  160. size_t backup_memory_bytes() const
  161. {
  162. return m_backup_memory ? m_backup_memory->size() : 0;
  163. }
  164. };
  165. READONLY_AFTER_INIT static KmallocGlobalHeap* g_kmalloc_global;
  166. alignas(KmallocGlobalHeap) static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalHeap)];
  167. // Treat the heap as logically separate from .bss
  168. __attribute__((section(".heap"))) static u8 kmalloc_eternal_heap[ETERNAL_RANGE_SIZE];
  169. __attribute__((section(".heap"))) static u8 kmalloc_pool_heap[POOL_SIZE];
  170. static size_t g_kmalloc_bytes_eternal = 0;
  171. static size_t g_kmalloc_call_count;
  172. static size_t g_kfree_call_count;
  173. static size_t g_nested_kfree_calls;
  174. bool g_dump_kmalloc_stacks;
  175. static u8* s_next_eternal_ptr;
  176. READONLY_AFTER_INIT static u8* s_end_of_eternal_range;
  177. static void kmalloc_allocate_backup_memory()
  178. {
  179. g_kmalloc_global->allocate_backup_memory();
  180. }
  181. void kmalloc_enable_expand()
  182. {
  183. g_kmalloc_global->allocate_backup_memory();
  184. }
  185. static inline void kmalloc_verify_nospinlock_held()
  186. {
  187. // Catch bad callers allocating under spinlock.
  188. if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
  189. VERIFY(!Processor::in_critical());
  190. }
  191. }
  192. UNMAP_AFTER_INIT void kmalloc_init()
  193. {
  194. // Zero out heap since it's placed after end_of_kernel_bss.
  195. memset(kmalloc_eternal_heap, 0, sizeof(kmalloc_eternal_heap));
  196. memset(kmalloc_pool_heap, 0, sizeof(kmalloc_pool_heap));
  197. g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalHeap(kmalloc_pool_heap, sizeof(kmalloc_pool_heap));
  198. s_lock.initialize();
  199. s_next_eternal_ptr = kmalloc_eternal_heap;
  200. s_end_of_eternal_range = s_next_eternal_ptr + sizeof(kmalloc_eternal_heap);
  201. }
  202. void* kmalloc_eternal(size_t size)
  203. {
  204. kmalloc_verify_nospinlock_held();
  205. size = round_up_to_power_of_two(size, sizeof(void*));
  206. SpinlockLocker lock(s_lock);
  207. void* ptr = s_next_eternal_ptr;
  208. s_next_eternal_ptr += size;
  209. VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
  210. g_kmalloc_bytes_eternal += size;
  211. return ptr;
  212. }
  213. void* kmalloc(size_t size)
  214. {
  215. kmalloc_verify_nospinlock_held();
  216. SpinlockLocker lock(s_lock);
  217. ++g_kmalloc_call_count;
  218. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  219. dbgln("kmalloc({})", size);
  220. Kernel::dump_backtrace();
  221. }
  222. void* ptr = g_kmalloc_global->m_heap.allocate(size);
  223. Thread* current_thread = Thread::current();
  224. if (!current_thread)
  225. current_thread = Processor::idle_thread();
  226. if (current_thread)
  227. PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
  228. return ptr;
  229. }
  230. void kfree_sized(void* ptr, size_t size)
  231. {
  232. (void)size;
  233. return kfree(ptr);
  234. }
  235. void kfree(void* ptr)
  236. {
  237. if (!ptr)
  238. return;
  239. kmalloc_verify_nospinlock_held();
  240. SpinlockLocker lock(s_lock);
  241. ++g_kfree_call_count;
  242. ++g_nested_kfree_calls;
  243. if (g_nested_kfree_calls == 1) {
  244. Thread* current_thread = Thread::current();
  245. if (!current_thread)
  246. current_thread = Processor::idle_thread();
  247. if (current_thread)
  248. PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
  249. }
  250. g_kmalloc_global->m_heap.deallocate(ptr);
  251. --g_nested_kfree_calls;
  252. }
  253. size_t kmalloc_good_size(size_t size)
  254. {
  255. return size;
  256. }
  257. [[gnu::malloc, gnu::alloc_size(1), gnu::alloc_align(2)]] static void* kmalloc_aligned_cxx(size_t size, size_t alignment)
  258. {
  259. VERIFY(alignment <= 4096);
  260. void* ptr = kmalloc(size + alignment + sizeof(ptrdiff_t));
  261. if (ptr == nullptr)
  262. return nullptr;
  263. size_t max_addr = (size_t)ptr + alignment;
  264. void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
  265. ((ptrdiff_t*)aligned_ptr)[-1] = (ptrdiff_t)((u8*)aligned_ptr - (u8*)ptr);
  266. return aligned_ptr;
  267. }
  268. void* operator new(size_t size)
  269. {
  270. void* ptr = kmalloc(size);
  271. VERIFY(ptr);
  272. return ptr;
  273. }
  274. void* operator new(size_t size, const std::nothrow_t&) noexcept
  275. {
  276. return kmalloc(size);
  277. }
  278. void* operator new(size_t size, std::align_val_t al)
  279. {
  280. void* ptr = kmalloc_aligned_cxx(size, (size_t)al);
  281. VERIFY(ptr);
  282. return ptr;
  283. }
  284. void* operator new(size_t size, std::align_val_t al, const std::nothrow_t&) noexcept
  285. {
  286. return kmalloc_aligned_cxx(size, (size_t)al);
  287. }
  288. void* operator new[](size_t size)
  289. {
  290. void* ptr = kmalloc(size);
  291. VERIFY(ptr);
  292. return ptr;
  293. }
  294. void* operator new[](size_t size, const std::nothrow_t&) noexcept
  295. {
  296. return kmalloc(size);
  297. }
  298. void operator delete(void*) noexcept
  299. {
  300. // All deletes in kernel code should have a known size.
  301. VERIFY_NOT_REACHED();
  302. }
  303. void operator delete(void* ptr, size_t size) noexcept
  304. {
  305. return kfree_sized(ptr, size);
  306. }
  307. void operator delete(void* ptr, size_t, std::align_val_t) noexcept
  308. {
  309. return kfree_aligned(ptr);
  310. }
  311. void operator delete[](void*) noexcept
  312. {
  313. // All deletes in kernel code should have a known size.
  314. VERIFY_NOT_REACHED();
  315. }
  316. void operator delete[](void* ptr, size_t size) noexcept
  317. {
  318. return kfree_sized(ptr, size);
  319. }
  320. void get_kmalloc_stats(kmalloc_stats& stats)
  321. {
  322. SpinlockLocker lock(s_lock);
  323. stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes();
  324. stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes();
  325. stats.bytes_eternal = g_kmalloc_bytes_eternal;
  326. stats.kmalloc_call_count = g_kmalloc_call_count;
  327. stats.kfree_call_count = g_kfree_call_count;
  328. }