kmalloc.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. /*
  27. * Really really *really* Q&D malloc() and free() implementations
  28. * just to get going. Don't ever let anyone see this shit. :^)
  29. */
  30. #include <AK/Assertions.h>
  31. #include <AK/NonnullOwnPtrVector.h>
  32. #include <AK/Types.h>
  33. #include <Kernel/Arch/x86/CPU.h>
  34. #include <Kernel/Debug.h>
  35. #include <Kernel/Heap/Heap.h>
  36. #include <Kernel/Heap/kmalloc.h>
  37. #include <Kernel/KSyms.h>
  38. #include <Kernel/Panic.h>
  39. #include <Kernel/Process.h>
  40. #include <Kernel/Scheduler.h>
  41. #include <Kernel/SpinLock.h>
  42. #include <Kernel/StdLib.h>
  43. #include <Kernel/VM/MemoryManager.h>
  44. #define CHUNK_SIZE 32
  45. #define POOL_SIZE (2 * MiB)
  46. #define ETERNAL_RANGE_SIZE (2 * MiB)
  47. static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
  48. static void kmalloc_allocate_backup_memory();
  49. struct KmallocGlobalHeap {
  50. struct ExpandGlobalHeap {
  51. KmallocGlobalHeap& m_global_heap;
  52. ExpandGlobalHeap(KmallocGlobalHeap& global_heap)
  53. : m_global_heap(global_heap)
  54. {
  55. }
  56. bool m_adding { false };
  57. bool add_memory(size_t allocation_request)
  58. {
  59. if (!MemoryManager::is_initialized()) {
  60. if constexpr (KMALLOC_DEBUG) {
  61. dmesgln("kmalloc: Cannot expand heap before MM is initialized!");
  62. }
  63. return false;
  64. }
  65. VERIFY(!m_adding);
  66. TemporaryChange change(m_adding, true);
  67. // At this point we have very little memory left. Any attempt to
  68. // kmalloc() could fail, so use our backup memory first, so we
  69. // can't really reliably allocate even a new region of memory.
  70. // This is why we keep a backup region, which we can
  71. auto region = move(m_global_heap.m_backup_memory);
  72. if (!region) {
  73. // Be careful to not log too much here. We don't want to trigger
  74. // any further calls to kmalloc(). We're already out of memory
  75. // and don't have any backup memory, either!
  76. if constexpr (KMALLOC_DEBUG) {
  77. dmesgln("kmalloc: Cannot expand heap: no backup memory");
  78. }
  79. return false;
  80. }
  81. // At this point we should have at least enough memory from the
  82. // backup region to be able to log properly
  83. if constexpr (KMALLOC_DEBUG) {
  84. dmesgln("kmalloc: Adding memory to heap at {}, bytes: {}", region->vaddr(), region->size());
  85. }
  86. auto& subheap = m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  87. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  88. // Since we pulled in our backup heap, make sure we allocate another
  89. // backup heap before returning. Otherwise we potentially lose
  90. // the ability to expand the heap next time we get called.
  91. ScopeGuard guard([&]() {
  92. // We may need to defer allocating backup memory because the
  93. // heap expansion may have been triggered while holding some
  94. // other spinlock. If the expansion happens to need the same
  95. // spinlock we would deadlock. So, if we're in any lock, defer
  96. Processor::current().deferred_call_queue(kmalloc_allocate_backup_memory);
  97. });
  98. // Now that we added our backup memory, check if the backup heap
  99. // was big enough to likely satisfy the request
  100. if (subheap.free_bytes() < allocation_request) {
  101. // Looks like we probably need more
  102. size_t memory_size = page_round_up(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request));
  103. // Add some more to the new heap. We're already using it for other
  104. // allocations not including the original allocation_request
  105. // that triggered heap expansion. If we don't allocate
  106. memory_size += 1 * MiB;
  107. region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
  108. if (region) {
  109. dbgln("kmalloc: Adding even more memory to heap at {}, bytes: {}", region->vaddr(), region->size());
  110. m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  111. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  112. } else {
  113. dbgln("kmalloc: Could not expand heap to satisfy allocation of {} bytes", allocation_request);
  114. return false;
  115. }
  116. }
  117. return true;
  118. }
  119. bool remove_memory(void* memory)
  120. {
  121. // This is actually relatively unlikely to happen, because it requires that all
  122. // allocated memory in a subheap to be freed. Only then the subheap can be removed...
  123. for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) {
  124. if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) {
  125. auto region = m_global_heap.m_subheap_memory.take(i);
  126. if (!m_global_heap.m_backup_memory) {
  127. if constexpr (KMALLOC_DEBUG) {
  128. dmesgln("kmalloc: Using removed memory as backup: {}, bytes: {}", region->vaddr(), region->size());
  129. }
  130. m_global_heap.m_backup_memory = move(region);
  131. } else {
  132. if constexpr (KMALLOC_DEBUG) {
  133. dmesgln("kmalloc: Queue removing memory from heap at {}, bytes: {}", region->vaddr(), region->size());
  134. }
  135. Processor::deferred_call_queue([this, region = move(region)]() mutable {
  136. // We need to defer freeing the region to prevent a potential
  137. // deadlock since we are still holding the kmalloc lock
  138. // We don't really need to do anything other than holding
  139. // onto the region. Unless we already used the backup
  140. // memory, in which case we want to use the region as the
  141. // new backup.
  142. ScopedSpinLock lock(s_lock);
  143. if (!m_global_heap.m_backup_memory) {
  144. if constexpr (KMALLOC_DEBUG) {
  145. dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size());
  146. }
  147. m_global_heap.m_backup_memory = move(region);
  148. } else {
  149. if constexpr (KMALLOC_DEBUG) {
  150. dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be freed now", region->vaddr(), region->size());
  151. }
  152. }
  153. });
  154. }
  155. return true;
  156. }
  157. }
  158. if constexpr (KMALLOC_DEBUG) {
  159. dmesgln("kmalloc: Cannot remove memory from heap: {}", VirtualAddress(memory));
  160. }
  161. return false;
  162. }
  163. };
  164. typedef ExpandableHeap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE, ExpandGlobalHeap> HeapType;
  165. HeapType m_heap;
  166. NonnullOwnPtrVector<Region> m_subheap_memory;
  167. OwnPtr<Region> m_backup_memory;
  168. KmallocGlobalHeap(u8* memory, size_t memory_size)
  169. : m_heap(memory, memory_size, ExpandGlobalHeap(*this))
  170. {
  171. }
  172. void allocate_backup_memory()
  173. {
  174. if (m_backup_memory)
  175. return;
  176. m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
  177. }
  178. size_t backup_memory_bytes() const
  179. {
  180. return m_backup_memory ? m_backup_memory->size() : 0;
  181. }
  182. };
  183. READONLY_AFTER_INIT static KmallocGlobalHeap* g_kmalloc_global;
  184. static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalHeap)];
  185. // Treat the heap as logically separate from .bss
  186. __attribute__((section(".heap"))) static u8 kmalloc_eternal_heap[ETERNAL_RANGE_SIZE];
  187. __attribute__((section(".heap"))) static u8 kmalloc_pool_heap[POOL_SIZE];
  188. static size_t g_kmalloc_bytes_eternal = 0;
  189. static size_t g_kmalloc_call_count;
  190. static size_t g_kfree_call_count;
  191. bool g_dump_kmalloc_stacks;
  192. static u8* s_next_eternal_ptr;
  193. READONLY_AFTER_INIT static u8* s_end_of_eternal_range;
  194. static void kmalloc_allocate_backup_memory()
  195. {
  196. g_kmalloc_global->allocate_backup_memory();
  197. }
  198. void kmalloc_enable_expand()
  199. {
  200. g_kmalloc_global->allocate_backup_memory();
  201. }
  202. UNMAP_AFTER_INIT void kmalloc_init()
  203. {
  204. // Zero out heap since it's placed after end_of_kernel_bss.
  205. memset(kmalloc_eternal_heap, 0, sizeof(kmalloc_eternal_heap));
  206. memset(kmalloc_pool_heap, 0, sizeof(kmalloc_pool_heap));
  207. g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalHeap(kmalloc_pool_heap, sizeof(kmalloc_pool_heap));
  208. s_lock.initialize();
  209. s_next_eternal_ptr = kmalloc_eternal_heap;
  210. s_end_of_eternal_range = s_next_eternal_ptr + sizeof(kmalloc_pool_heap);
  211. }
  212. void* kmalloc_eternal(size_t size)
  213. {
  214. size = round_up_to_power_of_two(size, sizeof(void*));
  215. ScopedSpinLock lock(s_lock);
  216. void* ptr = s_next_eternal_ptr;
  217. s_next_eternal_ptr += size;
  218. VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
  219. g_kmalloc_bytes_eternal += size;
  220. return ptr;
  221. }
  222. void* kmalloc(size_t size)
  223. {
  224. ScopedSpinLock lock(s_lock);
  225. ++g_kmalloc_call_count;
  226. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  227. dbgln("kmalloc({})", size);
  228. Kernel::dump_backtrace();
  229. }
  230. void* ptr = g_kmalloc_global->m_heap.allocate(size);
  231. if (!ptr) {
  232. PANIC("kmalloc: Out of memory (requested size: {})", size);
  233. }
  234. return ptr;
  235. }
  236. void kfree(void* ptr)
  237. {
  238. if (!ptr)
  239. return;
  240. ScopedSpinLock lock(s_lock);
  241. ++g_kfree_call_count;
  242. g_kmalloc_global->m_heap.deallocate(ptr);
  243. }
  244. void* krealloc(void* ptr, size_t new_size)
  245. {
  246. ScopedSpinLock lock(s_lock);
  247. return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
  248. }
  249. void* operator new(size_t size)
  250. {
  251. return kmalloc(size);
  252. }
  253. void* operator new[](size_t size)
  254. {
  255. return kmalloc(size);
  256. }
  257. void get_kmalloc_stats(kmalloc_stats& stats)
  258. {
  259. ScopedSpinLock lock(s_lock);
  260. stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes();
  261. stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes();
  262. stats.bytes_eternal = g_kmalloc_bytes_eternal;
  263. stats.kmalloc_call_count = g_kmalloc_call_count;
  264. stats.kfree_call_count = g_kfree_call_count;
  265. }