kmalloc.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. /*
  7. * Really really *really* Q&D malloc() and free() implementations
  8. * just to get going. Don't ever let anyone see this shit. :^)
  9. */
  10. #include <AK/Assertions.h>
  11. #include <AK/Types.h>
  12. #include <Kernel/Debug.h>
  13. #include <Kernel/Heap/Heap.h>
  14. #include <Kernel/Heap/kmalloc.h>
  15. #include <Kernel/KSyms.h>
  16. #include <Kernel/Locking/Spinlock.h>
  17. #include <Kernel/Memory/MemoryManager.h>
  18. #include <Kernel/Panic.h>
  19. #include <Kernel/PerformanceManager.h>
  20. #include <Kernel/Sections.h>
  21. #include <Kernel/StdLib.h>
  22. #if ARCH(I386)
  23. static constexpr size_t CHUNK_SIZE = 32;
  24. #else
  25. static constexpr size_t CHUNK_SIZE = 64;
  26. #endif
  27. #define POOL_SIZE (2 * MiB)
  28. #define ETERNAL_RANGE_SIZE (4 * MiB)
  29. namespace std {
  30. const nothrow_t nothrow;
  31. }
  32. static RecursiveSpinlock s_lock; // needs to be recursive because of dump_backtrace()
  33. struct KmallocSubheap {
  34. KmallocSubheap(u8* base, size_t size)
  35. : allocator(base, size)
  36. {
  37. }
  38. IntrusiveListNode<KmallocSubheap> list_node;
  39. Heap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE> allocator;
  40. };
  41. struct KmallocGlobalData {
  42. static constexpr size_t minimum_subheap_size = 1 * MiB;
  43. KmallocGlobalData(u8* initial_heap, size_t initial_heap_size)
  44. {
  45. add_subheap(initial_heap, initial_heap_size);
  46. }
  47. void add_subheap(u8* storage, size_t storage_size)
  48. {
  49. dbgln("Adding kmalloc subheap @ {} with size {}", storage, storage_size);
  50. static_assert(sizeof(KmallocSubheap) <= PAGE_SIZE);
  51. auto* subheap = new (storage) KmallocSubheap(storage + PAGE_SIZE, storage_size - PAGE_SIZE);
  52. subheaps.append(*subheap);
  53. }
  54. void* allocate(size_t size)
  55. {
  56. VERIFY(!expansion_in_progress);
  57. for (auto& subheap : subheaps) {
  58. if (auto* ptr = subheap.allocator.allocate(size))
  59. return ptr;
  60. }
  61. if (!try_expand(size)) {
  62. PANIC("OOM when trying to expand kmalloc heap.");
  63. }
  64. return allocate(size);
  65. }
  66. void deallocate(void* ptr)
  67. {
  68. VERIFY(!expansion_in_progress);
  69. for (auto& subheap : subheaps) {
  70. if (subheap.allocator.contains(ptr)) {
  71. subheap.allocator.deallocate(ptr);
  72. return;
  73. }
  74. }
  75. PANIC("Bogus pointer {:p} passed to kfree()", ptr);
  76. }
  77. size_t allocated_bytes() const
  78. {
  79. size_t total = 0;
  80. for (auto const& subheap : subheaps)
  81. total += subheap.allocator.allocated_bytes();
  82. return total;
  83. }
  84. size_t free_bytes() const
  85. {
  86. size_t total = 0;
  87. for (auto const& subheap : subheaps)
  88. total += subheap.allocator.free_bytes();
  89. return total;
  90. }
  91. bool try_expand(size_t allocation_request)
  92. {
  93. VERIFY(!expansion_in_progress);
  94. TemporaryChange change(expansion_in_progress, true);
  95. auto new_subheap_base = expansion_data->next_virtual_address;
  96. Checked<size_t> padded_allocation_request = allocation_request;
  97. padded_allocation_request *= 2;
  98. padded_allocation_request += PAGE_SIZE;
  99. if (padded_allocation_request.has_overflow()) {
  100. PANIC("Integer overflow during kmalloc heap expansion");
  101. }
  102. size_t new_subheap_size = max(minimum_subheap_size, Memory::page_round_up(padded_allocation_request.value()));
  103. dbgln("Unable to allocate {}, expanding kmalloc heap", allocation_request);
  104. if (!expansion_data->virtual_range.contains(new_subheap_base, new_subheap_size)) {
  105. // FIXME: Dare to return false and allow kmalloc() to fail!
  106. PANIC("Out of address space when expanding kmalloc heap.");
  107. }
  108. auto physical_pages_or_error = MM.commit_user_physical_pages(new_subheap_size / PAGE_SIZE);
  109. if (physical_pages_or_error.is_error()) {
  110. // FIXME: Dare to return false!
  111. PANIC("Out of physical pages when expanding kmalloc heap.");
  112. }
  113. auto physical_pages = physical_pages_or_error.release_value();
  114. expansion_data->next_virtual_address = expansion_data->next_virtual_address.offset(new_subheap_size);
  115. auto cpu_supports_nx = Processor::current().has_feature(CPUFeature::NX);
  116. SpinlockLocker mm_locker(Memory::s_mm_lock);
  117. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
  118. for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
  119. // FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
  120. auto& page = physical_pages.take_one().leak_ref();
  121. auto* pte = MM.pte(MM.kernel_page_directory(), vaddr);
  122. VERIFY(pte);
  123. pte->set_physical_page_base(page.paddr().get());
  124. pte->set_global(true);
  125. pte->set_user_allowed(false);
  126. pte->set_writable(true);
  127. if (cpu_supports_nx)
  128. pte->set_execute_disabled(true);
  129. pte->set_present(true);
  130. }
  131. MM.flush_tlb(&MM.kernel_page_directory(), new_subheap_base, new_subheap_size / PAGE_SIZE);
  132. add_subheap(new_subheap_base.as_ptr(), new_subheap_size);
  133. return true;
  134. }
  135. void enable_expansion()
  136. {
  137. // FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
  138. auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB);
  139. expansion_data = KmallocGlobalData::ExpansionData {
  140. .virtual_range = virtual_range.value(),
  141. .next_virtual_address = virtual_range.value().base(),
  142. };
  143. // Make sure the entire kmalloc VM range is backed by page tables.
  144. // This avoids having to deal with lazy page table allocation during heap expansion.
  145. SpinlockLocker mm_locker(Memory::s_mm_lock);
  146. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
  147. for (auto vaddr = virtual_range.value().base(); vaddr < virtual_range.value().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
  148. MM.ensure_pte(MM.kernel_page_directory(), vaddr);
  149. }
  150. }
  151. struct ExpansionData {
  152. Memory::VirtualRange virtual_range;
  153. VirtualAddress next_virtual_address;
  154. };
  155. Optional<ExpansionData> expansion_data;
  156. IntrusiveList<&KmallocSubheap::list_node> subheaps;
  157. bool expansion_in_progress { false };
  158. };
  159. READONLY_AFTER_INIT static KmallocGlobalData* g_kmalloc_global;
  160. alignas(KmallocGlobalData) static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalData)];
  161. // Treat the heap as logically separate from .bss
  162. __attribute__((section(".heap"))) static u8 kmalloc_eternal_heap[ETERNAL_RANGE_SIZE];
  163. __attribute__((section(".heap"))) static u8 kmalloc_pool_heap[POOL_SIZE];
  164. static size_t g_kmalloc_bytes_eternal = 0;
  165. static size_t g_kmalloc_call_count;
  166. static size_t g_kfree_call_count;
  167. static size_t g_nested_kfree_calls;
  168. bool g_dump_kmalloc_stacks;
  169. static u8* s_next_eternal_ptr;
  170. READONLY_AFTER_INIT static u8* s_end_of_eternal_range;
  171. void kmalloc_enable_expand()
  172. {
  173. g_kmalloc_global->enable_expansion();
  174. }
  175. static inline void kmalloc_verify_nospinlock_held()
  176. {
  177. // Catch bad callers allocating under spinlock.
  178. if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
  179. VERIFY(!Processor::in_critical());
  180. }
  181. }
  182. UNMAP_AFTER_INIT void kmalloc_init()
  183. {
  184. // Zero out heap since it's placed after end_of_kernel_bss.
  185. memset(kmalloc_eternal_heap, 0, sizeof(kmalloc_eternal_heap));
  186. memset(kmalloc_pool_heap, 0, sizeof(kmalloc_pool_heap));
  187. g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalData(kmalloc_pool_heap, sizeof(kmalloc_pool_heap));
  188. s_lock.initialize();
  189. s_next_eternal_ptr = kmalloc_eternal_heap;
  190. s_end_of_eternal_range = s_next_eternal_ptr + sizeof(kmalloc_eternal_heap);
  191. }
  192. void* kmalloc_eternal(size_t size)
  193. {
  194. kmalloc_verify_nospinlock_held();
  195. size = round_up_to_power_of_two(size, sizeof(void*));
  196. SpinlockLocker lock(s_lock);
  197. void* ptr = s_next_eternal_ptr;
  198. s_next_eternal_ptr += size;
  199. VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
  200. g_kmalloc_bytes_eternal += size;
  201. return ptr;
  202. }
  203. void* kmalloc(size_t size)
  204. {
  205. kmalloc_verify_nospinlock_held();
  206. SpinlockLocker lock(s_lock);
  207. ++g_kmalloc_call_count;
  208. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  209. dbgln("kmalloc({})", size);
  210. Kernel::dump_backtrace();
  211. }
  212. void* ptr = g_kmalloc_global->allocate(size);
  213. Thread* current_thread = Thread::current();
  214. if (!current_thread)
  215. current_thread = Processor::idle_thread();
  216. if (current_thread)
  217. PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
  218. return ptr;
  219. }
  220. void kfree_sized(void* ptr, size_t size)
  221. {
  222. (void)size;
  223. return kfree(ptr);
  224. }
  225. void kfree(void* ptr)
  226. {
  227. if (!ptr)
  228. return;
  229. kmalloc_verify_nospinlock_held();
  230. SpinlockLocker lock(s_lock);
  231. ++g_kfree_call_count;
  232. ++g_nested_kfree_calls;
  233. if (g_nested_kfree_calls == 1) {
  234. Thread* current_thread = Thread::current();
  235. if (!current_thread)
  236. current_thread = Processor::idle_thread();
  237. if (current_thread)
  238. PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
  239. }
  240. g_kmalloc_global->deallocate(ptr);
  241. --g_nested_kfree_calls;
  242. }
  243. size_t kmalloc_good_size(size_t size)
  244. {
  245. return size;
  246. }
  247. void* kmalloc_aligned(size_t size, size_t alignment)
  248. {
  249. VERIFY(alignment <= 4096);
  250. Checked<size_t> real_allocation_size = size;
  251. real_allocation_size += alignment;
  252. real_allocation_size += sizeof(ptrdiff_t) + sizeof(size_t);
  253. void* ptr = kmalloc(real_allocation_size.value());
  254. if (ptr == nullptr)
  255. return nullptr;
  256. size_t max_addr = (size_t)ptr + alignment;
  257. void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
  258. ((ptrdiff_t*)aligned_ptr)[-1] = (ptrdiff_t)((u8*)aligned_ptr - (u8*)ptr);
  259. ((size_t*)aligned_ptr)[-2] = real_allocation_size.value();
  260. return aligned_ptr;
  261. }
  262. void* operator new(size_t size)
  263. {
  264. void* ptr = kmalloc(size);
  265. VERIFY(ptr);
  266. return ptr;
  267. }
  268. void* operator new(size_t size, const std::nothrow_t&) noexcept
  269. {
  270. return kmalloc(size);
  271. }
  272. void* operator new(size_t size, std::align_val_t al)
  273. {
  274. void* ptr = kmalloc_aligned(size, (size_t)al);
  275. VERIFY(ptr);
  276. return ptr;
  277. }
  278. void* operator new(size_t size, std::align_val_t al, const std::nothrow_t&) noexcept
  279. {
  280. return kmalloc_aligned(size, (size_t)al);
  281. }
  282. void* operator new[](size_t size)
  283. {
  284. void* ptr = kmalloc(size);
  285. VERIFY(ptr);
  286. return ptr;
  287. }
  288. void* operator new[](size_t size, const std::nothrow_t&) noexcept
  289. {
  290. return kmalloc(size);
  291. }
  292. void operator delete(void*) noexcept
  293. {
  294. // All deletes in kernel code should have a known size.
  295. VERIFY_NOT_REACHED();
  296. }
  297. void operator delete(void* ptr, size_t size) noexcept
  298. {
  299. return kfree_sized(ptr, size);
  300. }
  301. void operator delete(void* ptr, size_t, std::align_val_t) noexcept
  302. {
  303. return kfree_aligned(ptr);
  304. }
  305. void operator delete[](void*) noexcept
  306. {
  307. // All deletes in kernel code should have a known size.
  308. VERIFY_NOT_REACHED();
  309. }
  310. void operator delete[](void* ptr, size_t size) noexcept
  311. {
  312. return kfree_sized(ptr, size);
  313. }
  314. void get_kmalloc_stats(kmalloc_stats& stats)
  315. {
  316. SpinlockLocker lock(s_lock);
  317. stats.bytes_allocated = g_kmalloc_global->allocated_bytes();
  318. stats.bytes_free = g_kmalloc_global->free_bytes();
  319. stats.bytes_eternal = g_kmalloc_bytes_eternal;
  320. stats.kmalloc_call_count = g_kmalloc_call_count;
  321. stats.kfree_call_count = g_kfree_call_count;
  322. }