kmalloc.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/Types.h>
  8. #include <Kernel/Arch/PageDirectory.h>
  9. #include <Kernel/Debug.h>
  10. #include <Kernel/Heap/Heap.h>
  11. #include <Kernel/Heap/kmalloc.h>
  12. #include <Kernel/KSyms.h>
  13. #include <Kernel/Locking/Spinlock.h>
  14. #include <Kernel/Memory/MemoryManager.h>
  15. #include <Kernel/Panic.h>
  16. #include <Kernel/PerformanceManager.h>
  17. #include <Kernel/Sections.h>
  18. #include <Kernel/StdLib.h>
  19. #if ARCH(I386)
  20. static constexpr size_t CHUNK_SIZE = 32;
  21. #else
  22. static constexpr size_t CHUNK_SIZE = 64;
  23. #endif
  24. static_assert(is_power_of_two(CHUNK_SIZE));
  25. static constexpr size_t INITIAL_KMALLOC_MEMORY_SIZE = 2 * MiB;
  26. // Treat the heap as logically separate from .bss
  27. __attribute__((section(".heap"))) static u8 initial_kmalloc_memory[INITIAL_KMALLOC_MEMORY_SIZE];
  28. namespace std {
  29. const nothrow_t nothrow;
  30. }
  31. static RecursiveSpinlock s_lock; // needs to be recursive because of dump_backtrace()
  32. struct KmallocSubheap {
  33. KmallocSubheap(u8* base, size_t size)
  34. : allocator(base, size)
  35. {
  36. }
  37. IntrusiveListNode<KmallocSubheap> list_node;
  38. using List = IntrusiveList<&KmallocSubheap::list_node>;
  39. Heap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE> allocator;
  40. };
  41. class KmallocSlabBlock {
  42. public:
  43. static constexpr size_t block_size = 64 * KiB;
  44. static constexpr FlatPtr block_mask = ~(block_size - 1);
  45. KmallocSlabBlock(size_t slab_size)
  46. : m_slab_size(slab_size)
  47. , m_slab_count((block_size - sizeof(KmallocSlabBlock)) / slab_size)
  48. {
  49. for (size_t i = 0; i < m_slab_count; ++i) {
  50. auto* freelist_entry = (FreelistEntry*)(void*)(&m_data[i * slab_size]);
  51. freelist_entry->next = m_freelist;
  52. m_freelist = freelist_entry;
  53. }
  54. }
  55. void* allocate()
  56. {
  57. VERIFY(m_freelist);
  58. ++m_allocated_slabs;
  59. return exchange(m_freelist, m_freelist->next);
  60. }
  61. void deallocate(void* ptr)
  62. {
  63. VERIFY(ptr >= &m_data && ptr < ((u8*)this + block_size));
  64. --m_allocated_slabs;
  65. auto* freelist_entry = (FreelistEntry*)ptr;
  66. freelist_entry->next = m_freelist;
  67. m_freelist = freelist_entry;
  68. }
  69. bool is_full() const
  70. {
  71. return m_freelist == nullptr;
  72. }
  73. size_t allocated_bytes() const
  74. {
  75. return m_allocated_slabs * m_slab_size;
  76. }
  77. size_t free_bytes() const
  78. {
  79. return (m_slab_count - m_allocated_slabs) * m_slab_size;
  80. }
  81. IntrusiveListNode<KmallocSlabBlock> list_node;
  82. using List = IntrusiveList<&KmallocSlabBlock::list_node>;
  83. private:
  84. struct FreelistEntry {
  85. FreelistEntry* next;
  86. };
  87. FreelistEntry* m_freelist { nullptr };
  88. size_t m_slab_size { 0 };
  89. size_t m_slab_count { 0 };
  90. size_t m_allocated_slabs { 0 };
  91. [[gnu::aligned(16)]] u8 m_data[];
  92. };
  93. class KmallocSlabheap {
  94. public:
  95. KmallocSlabheap(size_t slab_size)
  96. : m_slab_size(slab_size)
  97. {
  98. }
  99. size_t slab_size() const { return m_slab_size; }
  100. void* allocate()
  101. {
  102. if (m_usable_blocks.is_empty()) {
  103. // FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned().
  104. // Handle this with a custom VM+page allocator instead of using kmalloc_aligned().
  105. auto* slot = kmalloc_aligned(KmallocSlabBlock::block_size, KmallocSlabBlock::block_size);
  106. if (!slot) {
  107. // FIXME: Dare to return nullptr!
  108. PANIC("OOM while growing slabheap ({})", m_slab_size);
  109. }
  110. auto* block = new (slot) KmallocSlabBlock(m_slab_size);
  111. m_usable_blocks.append(*block);
  112. }
  113. auto* block = m_usable_blocks.first();
  114. auto* ptr = block->allocate();
  115. if (block->is_full())
  116. m_full_blocks.append(*block);
  117. memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
  118. return ptr;
  119. }
  120. void deallocate(void* ptr)
  121. {
  122. memset(ptr, KFREE_SCRUB_BYTE, m_slab_size);
  123. auto* block = (KmallocSlabBlock*)((FlatPtr)ptr & KmallocSlabBlock::block_mask);
  124. bool block_was_full = block->is_full();
  125. block->deallocate(ptr);
  126. if (block_was_full)
  127. m_usable_blocks.append(*block);
  128. }
  129. size_t allocated_bytes() const
  130. {
  131. size_t total = m_full_blocks.size_slow() * KmallocSlabBlock::block_size;
  132. for (auto const& slab_block : m_usable_blocks)
  133. total += slab_block.allocated_bytes();
  134. return total;
  135. }
  136. size_t free_bytes() const
  137. {
  138. size_t total = 0;
  139. for (auto const& slab_block : m_usable_blocks)
  140. total += slab_block.free_bytes();
  141. return total;
  142. }
  143. bool try_purge()
  144. {
  145. bool did_purge = false;
  146. // Note: We cannot remove children from the list when using a structured loop,
  147. // Because we need to advance the iterator before we delete the underlying
  148. // value, so we have to iterate manually
  149. auto block = m_usable_blocks.begin();
  150. while (block != m_usable_blocks.end()) {
  151. if (block->allocated_bytes() != 0) {
  152. ++block;
  153. continue;
  154. }
  155. auto& block_to_remove = *block;
  156. ++block;
  157. block_to_remove.list_node.remove();
  158. block_to_remove.~KmallocSlabBlock();
  159. kfree_aligned(&block_to_remove);
  160. did_purge = true;
  161. }
  162. return did_purge;
  163. }
  164. private:
  165. size_t m_slab_size { 0 };
  166. KmallocSlabBlock::List m_usable_blocks;
  167. KmallocSlabBlock::List m_full_blocks;
  168. };
  169. struct KmallocGlobalData {
  170. static constexpr size_t minimum_subheap_size = 1 * MiB;
  171. KmallocGlobalData(u8* initial_heap, size_t initial_heap_size)
  172. {
  173. add_subheap(initial_heap, initial_heap_size);
  174. }
  175. void add_subheap(u8* storage, size_t storage_size)
  176. {
  177. dbgln_if(KMALLOC_DEBUG, "Adding kmalloc subheap @ {} with size {}", storage, storage_size);
  178. static_assert(sizeof(KmallocSubheap) <= PAGE_SIZE);
  179. auto* subheap = new (storage) KmallocSubheap(storage + PAGE_SIZE, storage_size - PAGE_SIZE);
  180. subheaps.append(*subheap);
  181. }
  182. void* allocate(size_t size)
  183. {
  184. VERIFY(!expansion_in_progress);
  185. for (auto& slabheap : slabheaps) {
  186. if (size <= slabheap.slab_size())
  187. return slabheap.allocate();
  188. }
  189. for (auto& subheap : subheaps) {
  190. if (auto* ptr = subheap.allocator.allocate(size))
  191. return ptr;
  192. }
  193. // NOTE: This size calculation is a mirror of kmalloc_aligned(KmallocSlabBlock)
  194. if (size <= KmallocSlabBlock::block_size * 2 + sizeof(ptrdiff_t) + sizeof(size_t)) {
  195. // FIXME: We should propagate a freed pointer, to find the specific subheap it belonged to
  196. // This would save us iterating over them in the next step and remove a recursion
  197. bool did_purge = false;
  198. for (auto& slabheap : slabheaps) {
  199. if (slabheap.try_purge()) {
  200. dbgln_if(KMALLOC_DEBUG, "Kmalloc purged block(s) from slabheap of size {} to avoid expansion", slabheap.slab_size());
  201. did_purge = true;
  202. break;
  203. }
  204. }
  205. if (did_purge)
  206. return allocate(size);
  207. }
  208. if (!try_expand(size)) {
  209. PANIC("OOM when trying to expand kmalloc heap.");
  210. }
  211. return allocate(size);
  212. }
  213. void deallocate(void* ptr, size_t size)
  214. {
  215. VERIFY(!expansion_in_progress);
  216. VERIFY(is_valid_kmalloc_address(VirtualAddress { ptr }));
  217. for (auto& slabheap : slabheaps) {
  218. if (size <= slabheap.slab_size())
  219. return slabheap.deallocate(ptr);
  220. }
  221. for (auto& subheap : subheaps) {
  222. if (subheap.allocator.contains(ptr)) {
  223. subheap.allocator.deallocate(ptr);
  224. return;
  225. }
  226. }
  227. PANIC("Bogus pointer passed to kfree_sized({:p}, {})", ptr, size);
  228. }
  229. size_t allocated_bytes() const
  230. {
  231. size_t total = 0;
  232. for (auto const& subheap : subheaps)
  233. total += subheap.allocator.allocated_bytes();
  234. for (auto const& slabheap : slabheaps)
  235. total += slabheap.allocated_bytes();
  236. return total;
  237. }
  238. size_t free_bytes() const
  239. {
  240. size_t total = 0;
  241. for (auto const& subheap : subheaps)
  242. total += subheap.allocator.free_bytes();
  243. for (auto const& slabheap : slabheaps)
  244. total += slabheap.free_bytes();
  245. return total;
  246. }
  247. bool try_expand(size_t allocation_request)
  248. {
  249. VERIFY(!expansion_in_progress);
  250. TemporaryChange change(expansion_in_progress, true);
  251. auto new_subheap_base = expansion_data->next_virtual_address;
  252. Checked<size_t> padded_allocation_request = allocation_request;
  253. padded_allocation_request *= 2;
  254. padded_allocation_request += PAGE_SIZE;
  255. if (padded_allocation_request.has_overflow()) {
  256. PANIC("Integer overflow during kmalloc heap expansion");
  257. }
  258. auto rounded_allocation_request = Memory::page_round_up(padded_allocation_request.value());
  259. if (rounded_allocation_request.is_error()) {
  260. PANIC("Integer overflow computing pages for kmalloc heap expansion");
  261. }
  262. size_t new_subheap_size = max(minimum_subheap_size, rounded_allocation_request.value());
  263. dbgln_if(KMALLOC_DEBUG, "Unable to allocate {}, expanding kmalloc heap", allocation_request);
  264. if (!expansion_data->virtual_range.contains(new_subheap_base, new_subheap_size)) {
  265. // FIXME: Dare to return false and allow kmalloc() to fail!
  266. PANIC("Out of address space when expanding kmalloc heap.");
  267. }
  268. auto physical_pages_or_error = MM.commit_physical_pages(new_subheap_size / PAGE_SIZE);
  269. if (physical_pages_or_error.is_error()) {
  270. // FIXME: Dare to return false!
  271. PANIC("Out of physical pages when expanding kmalloc heap.");
  272. }
  273. auto physical_pages = physical_pages_or_error.release_value();
  274. expansion_data->next_virtual_address = expansion_data->next_virtual_address.offset(new_subheap_size);
  275. auto cpu_supports_nx = Processor::current().has_nx();
  276. SpinlockLocker mm_locker(Memory::s_mm_lock);
  277. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
  278. for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
  279. // FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
  280. auto& page = physical_pages.take_one().leak_ref();
  281. auto* pte = MM.pte(MM.kernel_page_directory(), vaddr);
  282. VERIFY(pte);
  283. pte->set_physical_page_base(page.paddr().get());
  284. pte->set_global(true);
  285. pte->set_user_allowed(false);
  286. pte->set_writable(true);
  287. if (cpu_supports_nx)
  288. pte->set_execute_disabled(true);
  289. pte->set_present(true);
  290. }
  291. add_subheap(new_subheap_base.as_ptr(), new_subheap_size);
  292. return true;
  293. }
  294. void enable_expansion()
  295. {
  296. // FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
  297. auto reserved_region = MUST(MM.allocate_unbacked_region_anywhere(64 * MiB, 1 * MiB));
  298. expansion_data = KmallocGlobalData::ExpansionData {
  299. .virtual_range = reserved_region->range(),
  300. .next_virtual_address = reserved_region->range().base(),
  301. };
  302. // Make sure the entire kmalloc VM range is backed by page tables.
  303. // This avoids having to deal with lazy page table allocation during heap expansion.
  304. SpinlockLocker mm_locker(Memory::s_mm_lock);
  305. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
  306. for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
  307. MM.ensure_pte(MM.kernel_page_directory(), vaddr);
  308. }
  309. (void)reserved_region.leak_ptr();
  310. }
  311. struct ExpansionData {
  312. Memory::VirtualRange virtual_range;
  313. VirtualAddress next_virtual_address;
  314. };
  315. Optional<ExpansionData> expansion_data;
  316. bool is_valid_kmalloc_address(VirtualAddress vaddr) const
  317. {
  318. if (vaddr.as_ptr() >= initial_kmalloc_memory && vaddr.as_ptr() < (initial_kmalloc_memory + INITIAL_KMALLOC_MEMORY_SIZE))
  319. return true;
  320. if (!expansion_data.has_value())
  321. return false;
  322. return expansion_data->virtual_range.contains(vaddr);
  323. }
  324. KmallocSubheap::List subheaps;
  325. KmallocSlabheap slabheaps[6] = { 16, 32, 64, 128, 256, 512 };
  326. bool expansion_in_progress { false };
  327. };
  328. READONLY_AFTER_INIT static KmallocGlobalData* g_kmalloc_global;
  329. alignas(KmallocGlobalData) static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalData)];
  330. static size_t g_kmalloc_call_count;
  331. static size_t g_kfree_call_count;
  332. static size_t g_nested_kfree_calls;
  333. bool g_dump_kmalloc_stacks;
  334. void kmalloc_enable_expand()
  335. {
  336. g_kmalloc_global->enable_expansion();
  337. }
  338. static inline void kmalloc_verify_nospinlock_held()
  339. {
  340. // Catch bad callers allocating under spinlock.
  341. if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
  342. VERIFY(!Processor::in_critical());
  343. }
  344. }
  345. UNMAP_AFTER_INIT void kmalloc_init()
  346. {
  347. // Zero out heap since it's placed after end_of_kernel_bss.
  348. memset(initial_kmalloc_memory, 0, sizeof(initial_kmalloc_memory));
  349. g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalData(initial_kmalloc_memory, sizeof(initial_kmalloc_memory));
  350. s_lock.initialize();
  351. }
  352. void* kmalloc(size_t size)
  353. {
  354. kmalloc_verify_nospinlock_held();
  355. SpinlockLocker lock(s_lock);
  356. ++g_kmalloc_call_count;
  357. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  358. dbgln("kmalloc({})", size);
  359. Kernel::dump_backtrace();
  360. }
  361. void* ptr = g_kmalloc_global->allocate(size);
  362. Thread* current_thread = Thread::current();
  363. if (!current_thread)
  364. current_thread = Processor::idle_thread();
  365. if (current_thread) {
  366. // FIXME: By the time we check this, we have already allocated above.
  367. // This means that in the case of an infinite recursion, we can't catch it this way.
  368. VERIFY(current_thread->is_allocation_enabled());
  369. PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
  370. }
  371. return ptr;
  372. }
  373. void* kcalloc(size_t count, size_t size)
  374. {
  375. if (Checked<size_t>::multiplication_would_overflow(count, size))
  376. return nullptr;
  377. size_t new_size = count * size;
  378. auto* ptr = kmalloc(new_size);
  379. // FIXME: Avoid redundantly scrubbing the memory in kmalloc()
  380. if (ptr)
  381. memset(ptr, 0, new_size);
  382. return ptr;
  383. }
  384. void kfree_sized(void* ptr, size_t size)
  385. {
  386. if (!ptr)
  387. return;
  388. VERIFY(size > 0);
  389. kmalloc_verify_nospinlock_held();
  390. SpinlockLocker lock(s_lock);
  391. ++g_kfree_call_count;
  392. ++g_nested_kfree_calls;
  393. if (g_nested_kfree_calls == 1) {
  394. Thread* current_thread = Thread::current();
  395. if (!current_thread)
  396. current_thread = Processor::idle_thread();
  397. if (current_thread) {
  398. VERIFY(current_thread->is_allocation_enabled());
  399. PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
  400. }
  401. }
  402. g_kmalloc_global->deallocate(ptr, size);
  403. --g_nested_kfree_calls;
  404. }
  405. size_t kmalloc_good_size(size_t size)
  406. {
  407. VERIFY(size > 0);
  408. // NOTE: There's no need to take the kmalloc lock, as the kmalloc slab-heaps (and their sizes) are constant
  409. for (auto const& slabheap : g_kmalloc_global->slabheaps) {
  410. if (size <= slabheap.slab_size())
  411. return slabheap.slab_size();
  412. }
  413. return round_up_to_power_of_two(size + Heap<CHUNK_SIZE>::AllocationHeaderSize, CHUNK_SIZE) - Heap<CHUNK_SIZE>::AllocationHeaderSize;
  414. }
  415. void* kmalloc_aligned(size_t size, size_t alignment)
  416. {
  417. Checked<size_t> real_allocation_size = size;
  418. real_allocation_size += alignment;
  419. real_allocation_size += sizeof(ptrdiff_t) + sizeof(size_t);
  420. void* ptr = kmalloc(real_allocation_size.value());
  421. if (ptr == nullptr)
  422. return nullptr;
  423. size_t max_addr = (size_t)ptr + alignment;
  424. void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
  425. ((ptrdiff_t*)aligned_ptr)[-1] = (ptrdiff_t)((u8*)aligned_ptr - (u8*)ptr);
  426. ((size_t*)aligned_ptr)[-2] = real_allocation_size.value();
  427. return aligned_ptr;
  428. }
  429. void* operator new(size_t size)
  430. {
  431. void* ptr = kmalloc(size);
  432. VERIFY(ptr);
  433. return ptr;
  434. }
  435. void* operator new(size_t size, std::nothrow_t const&) noexcept
  436. {
  437. return kmalloc(size);
  438. }
  439. void* operator new(size_t size, std::align_val_t al)
  440. {
  441. void* ptr = kmalloc_aligned(size, (size_t)al);
  442. VERIFY(ptr);
  443. return ptr;
  444. }
  445. void* operator new(size_t size, std::align_val_t al, std::nothrow_t const&) noexcept
  446. {
  447. return kmalloc_aligned(size, (size_t)al);
  448. }
  449. void* operator new[](size_t size)
  450. {
  451. void* ptr = kmalloc(size);
  452. VERIFY(ptr);
  453. return ptr;
  454. }
  455. void* operator new[](size_t size, std::nothrow_t const&) noexcept
  456. {
  457. return kmalloc(size);
  458. }
  459. void operator delete(void*) noexcept
  460. {
  461. // All deletes in kernel code should have a known size.
  462. VERIFY_NOT_REACHED();
  463. }
  464. void operator delete(void* ptr, size_t size) noexcept
  465. {
  466. return kfree_sized(ptr, size);
  467. }
  468. void operator delete(void* ptr, size_t, std::align_val_t) noexcept
  469. {
  470. return kfree_aligned(ptr);
  471. }
  472. void operator delete[](void*) noexcept
  473. {
  474. // All deletes in kernel code should have a known size.
  475. VERIFY_NOT_REACHED();
  476. }
  477. void operator delete[](void* ptr, size_t size) noexcept
  478. {
  479. return kfree_sized(ptr, size);
  480. }
  481. void get_kmalloc_stats(kmalloc_stats& stats)
  482. {
  483. SpinlockLocker lock(s_lock);
  484. stats.bytes_allocated = g_kmalloc_global->allocated_bytes();
  485. stats.bytes_free = g_kmalloc_global->free_bytes();
  486. stats.kmalloc_call_count = g_kmalloc_call_count;
  487. stats.kfree_call_count = g_kfree_call_count;
  488. }