kmalloc.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/Types.h>
  8. #include <Kernel/Arch/PageDirectory.h>
  9. #include <Kernel/Debug.h>
  10. #include <Kernel/Heap/Heap.h>
  11. #include <Kernel/Heap/kmalloc.h>
  12. #include <Kernel/KSyms.h>
  13. #include <Kernel/Library/Panic.h>
  14. #include <Kernel/Library/StdLib.h>
  15. #include <Kernel/Locking/Spinlock.h>
  16. #include <Kernel/Memory/MemoryManager.h>
  17. #include <Kernel/Sections.h>
  18. #include <Kernel/Security/AddressSanitizer.h>
  19. #include <Kernel/Tasks/PerformanceManager.h>
  20. #if ARCH(X86_64) || ARCH(AARCH64) || ARCH(RISCV64)
  21. static constexpr size_t CHUNK_SIZE = 64;
  22. #else
  23. # error Unknown architecture
  24. #endif
  25. static_assert(is_power_of_two(CHUNK_SIZE));
  26. static constexpr size_t INITIAL_KMALLOC_MEMORY_SIZE = 2 * MiB;
  27. static constexpr size_t KMALLOC_DEFAULT_ALIGNMENT = 16;
  28. // Treat the heap as logically separate from .bss
  29. __attribute__((section(".heap"))) static u8 initial_kmalloc_memory[INITIAL_KMALLOC_MEMORY_SIZE];
  30. namespace std {
  31. nothrow_t const nothrow;
  32. }
  33. // FIXME: Figure out whether this can be MemoryManager.
  34. static RecursiveSpinlock<LockRank::None> s_lock {}; // needs to be recursive because of dump_backtrace()
  35. struct KmallocSubheap {
  36. KmallocSubheap(u8* base, size_t size)
  37. : allocator(base, size)
  38. {
  39. }
  40. IntrusiveListNode<KmallocSubheap> list_node;
  41. using List = IntrusiveList<&KmallocSubheap::list_node>;
  42. Heap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE> allocator;
  43. };
  44. class KmallocSlabBlock {
  45. public:
  46. static constexpr size_t block_size = 64 * KiB;
  47. static constexpr FlatPtr block_mask = ~(block_size - 1);
  48. KmallocSlabBlock(size_t slab_size)
  49. : m_slab_size(slab_size)
  50. , m_slab_count((block_size - sizeof(KmallocSlabBlock)) / slab_size)
  51. {
  52. for (size_t i = 0; i < m_slab_count; ++i) {
  53. auto* freelist_entry = (FreelistEntry*)(void*)(&m_data[i * slab_size]);
  54. freelist_entry->next = m_freelist;
  55. m_freelist = freelist_entry;
  56. }
  57. }
  58. void* allocate([[maybe_unused]] size_t requested_size)
  59. {
  60. VERIFY(m_freelist);
  61. ++m_allocated_slabs;
  62. #ifdef HAS_ADDRESS_SANITIZER
  63. AddressSanitizer::fill_shadow((FlatPtr)m_freelist, sizeof(FreelistEntry::next), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
  64. #endif
  65. auto* ptr = exchange(m_freelist, m_freelist->next);
  66. #ifdef HAS_ADDRESS_SANITIZER
  67. AddressSanitizer::mark_region((FlatPtr)ptr, requested_size, m_slab_size, AddressSanitizer::ShadowType::Malloc);
  68. #endif
  69. return ptr;
  70. }
  71. void deallocate(void* ptr)
  72. {
  73. VERIFY(ptr >= &m_data && ptr < ((u8*)this + block_size));
  74. --m_allocated_slabs;
  75. auto* freelist_entry = (FreelistEntry*)ptr;
  76. #ifdef HAS_ADDRESS_SANITIZER
  77. AddressSanitizer::fill_shadow((FlatPtr)freelist_entry, sizeof(FreelistEntry::next), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
  78. #endif
  79. freelist_entry->next = m_freelist;
  80. #ifdef HAS_ADDRESS_SANITIZER
  81. AddressSanitizer::fill_shadow((FlatPtr)freelist_entry, m_slab_size, AddressSanitizer::ShadowType::Free);
  82. #endif
  83. m_freelist = freelist_entry;
  84. }
  85. bool is_full() const
  86. {
  87. return m_freelist == nullptr;
  88. }
  89. size_t allocated_bytes() const
  90. {
  91. return m_allocated_slabs * m_slab_size;
  92. }
  93. size_t free_bytes() const
  94. {
  95. return (m_slab_count - m_allocated_slabs) * m_slab_size;
  96. }
  97. IntrusiveListNode<KmallocSlabBlock> list_node;
  98. using List = IntrusiveList<&KmallocSlabBlock::list_node>;
  99. private:
  100. struct FreelistEntry {
  101. FreelistEntry* next;
  102. };
  103. FreelistEntry* m_freelist { nullptr };
  104. size_t m_slab_size { 0 };
  105. size_t m_slab_count { 0 };
  106. size_t m_allocated_slabs { 0 };
  107. [[gnu::aligned(16)]] u8 m_data[];
  108. };
  109. class KmallocSlabheap {
  110. public:
  111. KmallocSlabheap(size_t slab_size)
  112. : m_slab_size(slab_size)
  113. {
  114. }
  115. size_t slab_size() const { return m_slab_size; }
  116. void* allocate(size_t requested_size, [[maybe_unused]] CallerWillInitializeMemory caller_will_initialize_memory)
  117. {
  118. if (m_usable_blocks.is_empty()) {
  119. // FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned().
  120. // Handle this with a custom VM+page allocator instead of using kmalloc_aligned().
  121. auto* slot = kmalloc_aligned(KmallocSlabBlock::block_size, KmallocSlabBlock::block_size);
  122. if (!slot) {
  123. dbgln_if(KMALLOC_DEBUG, "OOM while growing slabheap ({})", m_slab_size);
  124. return nullptr;
  125. }
  126. auto* block = new (slot) KmallocSlabBlock(m_slab_size);
  127. m_usable_blocks.append(*block);
  128. }
  129. auto* block = m_usable_blocks.first();
  130. auto* ptr = block->allocate(requested_size);
  131. if (block->is_full())
  132. m_full_blocks.append(*block);
  133. #ifndef HAS_ADDRESS_SANITIZER
  134. if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
  135. memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
  136. }
  137. #endif
  138. return ptr;
  139. }
  140. void deallocate(void* ptr)
  141. {
  142. #ifndef HAS_ADDRESS_SANITIZER
  143. memset(ptr, KFREE_SCRUB_BYTE, m_slab_size);
  144. #endif
  145. auto* block = (KmallocSlabBlock*)((FlatPtr)ptr & KmallocSlabBlock::block_mask);
  146. bool block_was_full = block->is_full();
  147. block->deallocate(ptr);
  148. if (block_was_full)
  149. m_usable_blocks.append(*block);
  150. }
  151. size_t allocated_bytes() const
  152. {
  153. size_t total = m_full_blocks.size_slow() * KmallocSlabBlock::block_size;
  154. for (auto const& slab_block : m_usable_blocks)
  155. total += slab_block.allocated_bytes();
  156. return total;
  157. }
  158. size_t free_bytes() const
  159. {
  160. size_t total = 0;
  161. for (auto const& slab_block : m_usable_blocks)
  162. total += slab_block.free_bytes();
  163. return total;
  164. }
  165. bool try_purge()
  166. {
  167. bool did_purge = false;
  168. // Note: We cannot remove children from the list when using a structured loop,
  169. // Because we need to advance the iterator before we delete the underlying
  170. // value, so we have to iterate manually
  171. auto block = m_usable_blocks.begin();
  172. while (block != m_usable_blocks.end()) {
  173. if (block->allocated_bytes() != 0) {
  174. ++block;
  175. continue;
  176. }
  177. auto& block_to_remove = *block;
  178. ++block;
  179. block_to_remove.list_node.remove();
  180. block_to_remove.~KmallocSlabBlock();
  181. kfree_sized(&block_to_remove, KmallocSlabBlock::block_size);
  182. did_purge = true;
  183. }
  184. return did_purge;
  185. }
  186. private:
  187. size_t m_slab_size { 0 };
  188. KmallocSlabBlock::List m_usable_blocks;
  189. KmallocSlabBlock::List m_full_blocks;
  190. };
  191. struct KmallocGlobalData {
  192. static constexpr size_t minimum_subheap_size = 1 * MiB;
  193. KmallocGlobalData(u8* initial_heap, size_t initial_heap_size)
  194. {
  195. add_subheap(initial_heap, initial_heap_size);
  196. }
  197. void add_subheap(u8* storage, size_t storage_size)
  198. {
  199. dbgln_if(KMALLOC_DEBUG, "Adding kmalloc subheap @ {} with size {}", storage, storage_size);
  200. static_assert(sizeof(KmallocSubheap) <= PAGE_SIZE);
  201. auto* subheap = new (storage) KmallocSubheap(storage + PAGE_SIZE, storage_size - PAGE_SIZE);
  202. subheaps.append(*subheap);
  203. }
  204. void* allocate(size_t size, size_t alignment, CallerWillInitializeMemory caller_will_initialize_memory)
  205. {
  206. VERIFY(!expansion_in_progress);
  207. for (auto& slabheap : slabheaps) {
  208. if (size <= slabheap.slab_size() && alignment <= slabheap.slab_size())
  209. return slabheap.allocate(size, caller_will_initialize_memory);
  210. }
  211. for (auto& subheap : subheaps) {
  212. if (auto* ptr = subheap.allocator.allocate(size, alignment, caller_will_initialize_memory))
  213. return ptr;
  214. }
  215. // NOTE: This size calculation is a mirror of kmalloc_aligned(KmallocSlabBlock)
  216. if (size <= KmallocSlabBlock::block_size * 2 + sizeof(ptrdiff_t) + sizeof(size_t)) {
  217. // FIXME: We should propagate a freed pointer, to find the specific subheap it belonged to
  218. // This would save us iterating over them in the next step and remove a recursion
  219. bool did_purge = false;
  220. for (auto& slabheap : slabheaps) {
  221. if (slabheap.try_purge()) {
  222. dbgln_if(KMALLOC_DEBUG, "Kmalloc purged block(s) from slabheap of size {} to avoid expansion", slabheap.slab_size());
  223. did_purge = true;
  224. break;
  225. }
  226. }
  227. if (did_purge)
  228. return allocate(size, alignment, caller_will_initialize_memory);
  229. }
  230. if (!try_expand(size)) {
  231. dbgln_if(KMALLOC_DEBUG, "OOM when trying to expand kmalloc heap");
  232. return nullptr;
  233. }
  234. return allocate(size, alignment, caller_will_initialize_memory);
  235. }
  236. void deallocate(void* ptr, size_t size)
  237. {
  238. VERIFY(!expansion_in_progress);
  239. VERIFY(is_valid_kmalloc_address(VirtualAddress { ptr }));
  240. for (auto& slabheap : slabheaps) {
  241. if (size <= slabheap.slab_size())
  242. return slabheap.deallocate(ptr);
  243. }
  244. for (auto& subheap : subheaps) {
  245. if (subheap.allocator.contains(ptr)) {
  246. subheap.allocator.deallocate(ptr);
  247. return;
  248. }
  249. }
  250. PANIC("Bogus pointer passed to kfree_sized({:p}, {})", ptr, size);
  251. }
  252. size_t allocated_bytes() const
  253. {
  254. size_t total = 0;
  255. for (auto const& subheap : subheaps)
  256. total += subheap.allocator.allocated_bytes();
  257. for (auto const& slabheap : slabheaps)
  258. total += slabheap.allocated_bytes();
  259. return total;
  260. }
  261. size_t free_bytes() const
  262. {
  263. size_t total = 0;
  264. for (auto const& subheap : subheaps)
  265. total += subheap.allocator.free_bytes();
  266. for (auto const& slabheap : slabheaps)
  267. total += slabheap.free_bytes();
  268. return total;
  269. }
  270. bool try_expand(size_t allocation_request)
  271. {
  272. VERIFY(!expansion_in_progress);
  273. TemporaryChange change(expansion_in_progress, true);
  274. auto new_subheap_base = expansion_data->next_virtual_address;
  275. Checked<size_t> padded_allocation_request = allocation_request;
  276. padded_allocation_request *= 2;
  277. padded_allocation_request += PAGE_SIZE;
  278. if (padded_allocation_request.has_overflow()) {
  279. PANIC("Integer overflow during kmalloc heap expansion");
  280. }
  281. auto rounded_allocation_request = Memory::page_round_up(padded_allocation_request.value());
  282. if (rounded_allocation_request.is_error()) {
  283. PANIC("Integer overflow computing pages for kmalloc heap expansion");
  284. }
  285. size_t new_subheap_size = max(minimum_subheap_size, rounded_allocation_request.value());
  286. dbgln_if(KMALLOC_DEBUG, "Unable to allocate {}, expanding kmalloc heap", allocation_request);
  287. if (!expansion_data->virtual_range.contains(new_subheap_base, new_subheap_size)) {
  288. dbgln_if(KMALLOC_DEBUG, "Out of address space when expanding kmalloc heap");
  289. return false;
  290. }
  291. auto physical_pages_or_error = MM.commit_physical_pages(new_subheap_size / PAGE_SIZE);
  292. if (physical_pages_or_error.is_error()) {
  293. dbgln_if(KMALLOC_DEBUG, "Out of address space when expanding kmalloc heap");
  294. return false;
  295. }
  296. auto physical_pages = physical_pages_or_error.release_value();
  297. expansion_data->next_virtual_address = expansion_data->next_virtual_address.offset(new_subheap_size);
  298. auto cpu_supports_nx = Processor::current().has_nx();
  299. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
  300. for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
  301. // FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
  302. auto& page = physical_pages.take_one().leak_ref();
  303. auto* pte = MM.pte(MM.kernel_page_directory(), vaddr);
  304. VERIFY(pte);
  305. pte->set_physical_page_base(page.paddr().get());
  306. pte->set_global(true);
  307. pte->set_user_allowed(false);
  308. pte->set_writable(true);
  309. if (cpu_supports_nx)
  310. pte->set_execute_disabled(true);
  311. pte->set_present(true);
  312. }
  313. add_subheap(new_subheap_base.as_ptr(), new_subheap_size);
  314. return true;
  315. }
  316. void enable_expansion()
  317. {
  318. // FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
  319. auto reserved_region = MUST(MM.allocate_unbacked_region_anywhere(64 * MiB, 1 * MiB));
  320. expansion_data = KmallocGlobalData::ExpansionData {
  321. .virtual_range = reserved_region->range(),
  322. .next_virtual_address = reserved_region->range().base(),
  323. };
  324. // Make sure the entire kmalloc VM range is backed by page tables.
  325. // This avoids having to deal with lazy page table allocation during heap expansion.
  326. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
  327. for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
  328. MM.ensure_pte(MM.kernel_page_directory(), vaddr);
  329. }
  330. (void)reserved_region.leak_ptr();
  331. }
  332. struct ExpansionData {
  333. Memory::VirtualRange virtual_range;
  334. VirtualAddress next_virtual_address;
  335. };
  336. Optional<ExpansionData> expansion_data;
  337. bool is_valid_kmalloc_address(VirtualAddress vaddr) const
  338. {
  339. if (vaddr.as_ptr() >= initial_kmalloc_memory && vaddr.as_ptr() < (initial_kmalloc_memory + INITIAL_KMALLOC_MEMORY_SIZE))
  340. return true;
  341. if (!expansion_data.has_value())
  342. return false;
  343. return expansion_data->virtual_range.contains(vaddr);
  344. }
  345. KmallocSubheap::List subheaps;
  346. KmallocSlabheap slabheaps[6] = { 16, 32, 64, 128, 256, 512 };
  347. bool expansion_in_progress { false };
  348. };
  349. READONLY_AFTER_INIT static KmallocGlobalData* g_kmalloc_global;
  350. alignas(KmallocGlobalData) static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalData)];
  351. static size_t g_kmalloc_call_count;
  352. static size_t g_kfree_call_count;
  353. static size_t g_nested_kfree_calls;
  354. bool g_dump_kmalloc_stacks;
  355. void kmalloc_enable_expand()
  356. {
  357. g_kmalloc_global->enable_expansion();
  358. }
  359. UNMAP_AFTER_INIT void kmalloc_init()
  360. {
  361. // Zero out heap since it's placed after end_of_kernel_bss.
  362. memset(initial_kmalloc_memory, 0, sizeof(initial_kmalloc_memory));
  363. g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalData(initial_kmalloc_memory, sizeof(initial_kmalloc_memory));
  364. s_lock.initialize();
  365. }
  366. static void* kmalloc_impl(size_t size, size_t alignment, CallerWillInitializeMemory caller_will_initialize_memory)
  367. {
  368. // Catch bad callers allocating under spinlock.
  369. if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
  370. Processor::verify_no_spinlocks_held();
  371. }
  372. // Alignment must be a power of two.
  373. VERIFY(is_power_of_two(alignment));
  374. SpinlockLocker lock(s_lock);
  375. ++g_kmalloc_call_count;
  376. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  377. dbgln("kmalloc({})", size);
  378. Kernel::dump_backtrace();
  379. }
  380. void* ptr = g_kmalloc_global->allocate(size, alignment, caller_will_initialize_memory);
  381. Thread* current_thread = Thread::current();
  382. if (!current_thread)
  383. current_thread = Processor::idle_thread();
  384. if (current_thread) {
  385. // FIXME: By the time we check this, we have already allocated above.
  386. // This means that in the case of an infinite recursion, we can't catch it this way.
  387. VERIFY(current_thread->is_allocation_enabled());
  388. PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
  389. }
  390. return ptr;
  391. }
  392. void* kmalloc(size_t size)
  393. {
  394. return kmalloc_impl(size, KMALLOC_DEFAULT_ALIGNMENT, CallerWillInitializeMemory::No);
  395. }
  396. void* kcalloc(size_t count, size_t size)
  397. {
  398. if (Checked<size_t>::multiplication_would_overflow(count, size))
  399. return nullptr;
  400. size_t new_size = count * size;
  401. auto* ptr = kmalloc_impl(new_size, KMALLOC_DEFAULT_ALIGNMENT, CallerWillInitializeMemory::Yes);
  402. if (ptr)
  403. memset(ptr, 0, new_size);
  404. return ptr;
  405. }
  406. void kfree_sized(void* ptr, size_t size)
  407. {
  408. if (!ptr)
  409. return;
  410. VERIFY(size > 0);
  411. // Catch bad callers allocating under spinlock.
  412. if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
  413. Processor::verify_no_spinlocks_held();
  414. }
  415. SpinlockLocker lock(s_lock);
  416. ++g_kfree_call_count;
  417. ++g_nested_kfree_calls;
  418. if (g_nested_kfree_calls == 1) {
  419. Thread* current_thread = Thread::current();
  420. if (!current_thread)
  421. current_thread = Processor::idle_thread();
  422. if (current_thread) {
  423. VERIFY(current_thread->is_allocation_enabled());
  424. PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
  425. }
  426. }
  427. g_kmalloc_global->deallocate(ptr, size);
  428. --g_nested_kfree_calls;
  429. }
  430. size_t kmalloc_good_size(size_t size)
  431. {
  432. VERIFY(size > 0);
  433. // NOTE: There's no need to take the kmalloc lock, as the kmalloc slab-heaps (and their sizes) are constant
  434. for (auto const& slabheap : g_kmalloc_global->slabheaps) {
  435. if (size <= slabheap.slab_size())
  436. return slabheap.slab_size();
  437. }
  438. return round_up_to_power_of_two(size + Heap<CHUNK_SIZE>::AllocationHeaderSize, CHUNK_SIZE) - Heap<CHUNK_SIZE>::AllocationHeaderSize;
  439. }
  440. void* kmalloc_aligned(size_t size, size_t alignment)
  441. {
  442. return kmalloc_impl(size, alignment, CallerWillInitializeMemory::No);
  443. }
  444. void* operator new(size_t size)
  445. {
  446. void* ptr = kmalloc(size);
  447. VERIFY(ptr);
  448. return ptr;
  449. }
  450. void* operator new(size_t size, std::nothrow_t const&) noexcept
  451. {
  452. return kmalloc(size);
  453. }
  454. void* operator new(size_t size, std::align_val_t al)
  455. {
  456. void* ptr = kmalloc_aligned(size, (size_t)al);
  457. VERIFY(ptr);
  458. return ptr;
  459. }
  460. void* operator new(size_t size, std::align_val_t al, std::nothrow_t const&) noexcept
  461. {
  462. return kmalloc_aligned(size, (size_t)al);
  463. }
  464. void* operator new[](size_t size)
  465. {
  466. void* ptr = kmalloc(size);
  467. VERIFY(ptr);
  468. return ptr;
  469. }
  470. void* operator new[](size_t size, std::nothrow_t const&) noexcept
  471. {
  472. return kmalloc(size);
  473. }
  474. void operator delete(void*) noexcept
  475. {
  476. // All deletes in kernel code should have a known size.
  477. VERIFY_NOT_REACHED();
  478. }
  479. void operator delete(void* ptr, size_t size) noexcept
  480. {
  481. return kfree_sized(ptr, size);
  482. }
  483. void operator delete(void* ptr, size_t size, std::align_val_t) noexcept
  484. {
  485. return kfree_sized(ptr, size);
  486. }
  487. void operator delete[](void*) noexcept
  488. {
  489. // All deletes in kernel code should have a known size.
  490. VERIFY_NOT_REACHED();
  491. }
  492. void operator delete[](void* ptr, size_t size) noexcept
  493. {
  494. return kfree_sized(ptr, size);
  495. }
  496. void get_kmalloc_stats(kmalloc_stats& stats)
  497. {
  498. SpinlockLocker lock(s_lock);
  499. stats.bytes_allocated = g_kmalloc_global->allocated_bytes();
  500. stats.bytes_free = g_kmalloc_global->free_bytes();
  501. stats.kmalloc_call_count = g_kmalloc_call_count;
  502. stats.kfree_call_count = g_kfree_call_count;
  503. }