kmalloc.cpp 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. /*
  27. * Really really *really* Q&D malloc() and free() implementations
  28. * just to get going. Don't ever let anyone see this shit. :^)
  29. */
  30. #include <AK/Assertions.h>
  31. #include <AK/NonnullOwnPtrVector.h>
  32. #include <AK/Optional.h>
  33. #include <AK/StringView.h>
  34. #include <AK/Types.h>
  35. #include <Kernel/Arch/i386/CPU.h>
  36. #include <Kernel/Heap/Heap.h>
  37. #include <Kernel/Heap/kmalloc.h>
  38. #include <Kernel/KSyms.h>
  39. #include <Kernel/Process.h>
  40. #include <Kernel/Scheduler.h>
  41. #include <Kernel/SpinLock.h>
  42. #include <Kernel/StdLib.h>
  43. #include <Kernel/VM/MemoryManager.h>
  44. #define SANITIZE_KMALLOC
  45. #define CHUNK_SIZE 32
  46. #define POOL_SIZE (2 * MiB)
  47. #define ETERNAL_RANGE_SIZE (2 * MiB)
  48. struct KmallocGlobalHeap {
  49. struct ExpandGlobalHeap {
  50. KmallocGlobalHeap& m_global_heap;
  51. ExpandGlobalHeap(KmallocGlobalHeap& global_heap)
  52. : m_global_heap(global_heap)
  53. {
  54. }
  55. bool add_memory(size_t allocation_request)
  56. {
  57. if (!MemoryManager::is_initialized()) {
  58. klog() << "kmalloc(): Cannot expand heap before MM is initialized!";
  59. return false;
  60. }
  61. // At this point we have very little memory left. Any attempt to
  62. // kmalloc() could fail, so use our backup memory first, so we
  63. // can't really reliably allocate even a new region of memory.
  64. // This is why we keep a backup region, which we can
  65. auto region = move(m_global_heap.m_backup_memory);
  66. if (!region) {
  67. klog() << "kmalloc(): Cannot expand heap: no backup memory";
  68. return false;
  69. }
  70. klog() << "kmalloc(): Adding memory to heap at " << region->vaddr() << ", bytes: " << region->size();
  71. auto& subheap = m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  72. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  73. // Since we pulled in our backup heap, make sure we allocate another
  74. // backup heap before returning. Otherwise we potentially lose
  75. // the ability to expand the heap next time we get called.
  76. ScopeGuard guard([&]() {
  77. m_global_heap.allocate_backup_memory();
  78. });
  79. // Now that we added our backup memory, check if the backup heap
  80. // was big enough to likely satisfy the request
  81. if (subheap.free_bytes() < allocation_request) {
  82. // Looks like we probably need more
  83. size_t memory_size = max(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request), (size_t)(1 * MiB));
  84. region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write);
  85. if (region) {
  86. klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size();
  87. m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
  88. m_global_heap.m_subheap_memory.append(region.release_nonnull());
  89. } else {
  90. klog() << "kmalloc(): Could not expand heap to satisfy allocation of " << allocation_request << " bytes";
  91. return false;
  92. }
  93. }
  94. return true;
  95. }
  96. bool remove_memory(void* memory)
  97. {
  98. // This is actually relatively unlikely to happen, because it requires that all
  99. // allocated memory in a subheap to be freed. Only then the subheap can be removed...
  100. for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) {
  101. if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) {
  102. auto region = m_global_heap.m_subheap_memory.take(i);
  103. klog() << "kmalloc(): Removing memory from heap at " << region->vaddr() << ", bytes: " << region->size();
  104. if (!m_global_heap.m_backup_memory) {
  105. klog() << "kmalloc(): Using removed memory as backup: " << region->vaddr() << ", bytes: " << region->size();
  106. m_global_heap.m_backup_memory = move(region);
  107. }
  108. return true;
  109. }
  110. }
  111. klog() << "kmalloc(): Cannot remove memory from heap: " << VirtualAddress(memory);
  112. return false;
  113. }
  114. };
  115. typedef ExpandableHeap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE, ExpandGlobalHeap> HeapType;
  116. HeapType m_heap;
  117. NonnullOwnPtrVector<Region> m_subheap_memory;
  118. OwnPtr<Region> m_backup_memory;
  119. KmallocGlobalHeap(u8* memory, size_t memory_size)
  120. : m_heap(memory, memory_size, ExpandGlobalHeap(*this))
  121. {
  122. }
  123. void allocate_backup_memory()
  124. {
  125. if (m_backup_memory)
  126. return;
  127. m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write);
  128. }
  129. size_t backup_memory_bytes() const
  130. {
  131. return m_backup_memory ? m_backup_memory->size() : 0;
  132. }
  133. };
  134. static KmallocGlobalHeap* g_kmalloc_global;
  135. // We need to make sure to not stomp on global variables or other parts
  136. // of the kernel image!
  137. extern u32 end_of_kernel_image;
  138. u8* const kmalloc_start = (u8*)PAGE_ROUND_UP(&end_of_kernel_image);
  139. u8* const kmalloc_end = kmalloc_start + (ETERNAL_RANGE_SIZE + POOL_SIZE) + sizeof(KmallocGlobalHeap);
  140. #define ETERNAL_BASE (kmalloc_start + sizeof(KmallocGlobalHeap))
  141. #define KMALLOC_BASE (ETERNAL_BASE + ETERNAL_RANGE_SIZE)
  142. static size_t g_kmalloc_bytes_eternal = 0;
  143. static size_t g_kmalloc_call_count;
  144. static size_t g_kfree_call_count;
  145. bool g_dump_kmalloc_stacks;
  146. static u8* s_next_eternal_ptr;
  147. static u8* s_end_of_eternal_range;
  148. static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
  149. void kmalloc_enable_expand()
  150. {
  151. g_kmalloc_global->allocate_backup_memory();
  152. }
  153. void kmalloc_init()
  154. {
  155. memset((void*)KMALLOC_BASE, 0, POOL_SIZE);
  156. g_kmalloc_global = new (kmalloc_start) KmallocGlobalHeap(KMALLOC_BASE, POOL_SIZE); // Place heap at kmalloc_start
  157. s_lock.initialize();
  158. s_next_eternal_ptr = (u8*)ETERNAL_BASE;
  159. s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
  160. }
  161. void* kmalloc_eternal(size_t size)
  162. {
  163. ScopedSpinLock lock(s_lock);
  164. void* ptr = s_next_eternal_ptr;
  165. s_next_eternal_ptr += size;
  166. ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
  167. g_kmalloc_bytes_eternal += size;
  168. return ptr;
  169. }
  170. void* kmalloc_impl(size_t size)
  171. {
  172. ScopedSpinLock lock(s_lock);
  173. ++g_kmalloc_call_count;
  174. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  175. dbg() << "kmalloc(" << size << ")";
  176. Kernel::dump_backtrace();
  177. }
  178. void* ptr = g_kmalloc_global->m_heap.allocate(size);
  179. if (!ptr) {
  180. klog() << "kmalloc(): PANIC! Out of memory (no suitable block for size " << size << ")";
  181. Kernel::dump_backtrace();
  182. Processor::halt();
  183. }
  184. return ptr;
  185. }
  186. void kfree(void* ptr)
  187. {
  188. if (!ptr)
  189. return;
  190. ScopedSpinLock lock(s_lock);
  191. ++g_kfree_call_count;
  192. g_kmalloc_global->m_heap.deallocate(ptr);
  193. }
  194. void* krealloc(void* ptr, size_t new_size)
  195. {
  196. ScopedSpinLock lock(s_lock);
  197. return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
  198. }
  199. void* operator new(size_t size)
  200. {
  201. return kmalloc(size);
  202. }
  203. void* operator new[](size_t size)
  204. {
  205. return kmalloc(size);
  206. }
  207. void get_kmalloc_stats(kmalloc_stats& stats)
  208. {
  209. ScopedSpinLock lock(s_lock);
  210. stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes();
  211. stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes();
  212. stats.bytes_eternal = g_kmalloc_bytes_eternal;
  213. stats.kmalloc_call_count = g_kmalloc_call_count;
  214. stats.kfree_call_count = g_kfree_call_count;
  215. }