kmalloc.cpp 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. /*
  27. * Really really *really* Q&D malloc() and free() implementations
  28. * just to get going. Don't ever let anyone see this shit. :^)
  29. */
  30. #include <AK/Assertions.h>
  31. #include <AK/Bitmap.h>
  32. #include <AK/Optional.h>
  33. #include <AK/Types.h>
  34. #include <Kernel/Arch/i386/CPU.h>
  35. #include <Kernel/Heap/kmalloc.h>
  36. #include <Kernel/KSyms.h>
  37. #include <Kernel/Process.h>
  38. #include <Kernel/Scheduler.h>
  39. #include <Kernel/SpinLock.h>
  40. #include <Kernel/StdLib.h>
  41. #define SANITIZE_KMALLOC
  42. struct AllocationHeader {
  43. size_t allocation_size_in_chunks;
  44. u8 data[0];
  45. };
  46. #define BASE_PHYSICAL (0xc0000000 + (4 * MB))
  47. #define CHUNK_SIZE 32
  48. #define POOL_SIZE (3 * MB)
  49. #define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MB))
  50. #define ETERNAL_RANGE_SIZE (2 * MB)
  51. static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
  52. size_t g_kmalloc_bytes_allocated = 0;
  53. size_t g_kmalloc_bytes_free = POOL_SIZE;
  54. size_t g_kmalloc_bytes_eternal = 0;
  55. size_t g_kmalloc_call_count;
  56. size_t g_kfree_call_count;
  57. bool g_dump_kmalloc_stacks;
  58. static u8* s_next_eternal_ptr;
  59. static u8* s_end_of_eternal_range;
  60. static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
  61. void kmalloc_init()
  62. {
  63. memset(&alloc_map, 0, sizeof(alloc_map));
  64. memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
  65. s_lock.initialize();
  66. g_kmalloc_bytes_eternal = 0;
  67. g_kmalloc_bytes_allocated = 0;
  68. g_kmalloc_bytes_free = POOL_SIZE;
  69. s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
  70. s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
  71. }
  72. void* kmalloc_eternal(size_t size)
  73. {
  74. ScopedSpinLock lock(s_lock);
  75. void* ptr = s_next_eternal_ptr;
  76. s_next_eternal_ptr += size;
  77. ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
  78. g_kmalloc_bytes_eternal += size;
  79. return ptr;
  80. }
  81. void* kmalloc_aligned(size_t size, size_t alignment)
  82. {
  83. void* ptr = kmalloc(size + alignment + sizeof(void*));
  84. size_t max_addr = (size_t)ptr + alignment;
  85. void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
  86. ((void**)aligned_ptr)[-1] = ptr;
  87. return aligned_ptr;
  88. }
  89. void kfree_aligned(void* ptr)
  90. {
  91. kfree(((void**)ptr)[-1]);
  92. }
  93. void* kmalloc_page_aligned(size_t size)
  94. {
  95. void* ptr = kmalloc_aligned(size, PAGE_SIZE);
  96. size_t d = (size_t)ptr;
  97. ASSERT((d & PAGE_MASK) == d);
  98. return ptr;
  99. }
  100. inline void* kmalloc_allocate(size_t first_chunk, size_t chunks_needed)
  101. {
  102. auto* a = (AllocationHeader*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
  103. u8* ptr = a->data;
  104. a->allocation_size_in_chunks = chunks_needed;
  105. Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
  106. bitmap_wrapper.set_range(first_chunk, chunks_needed, true);
  107. g_kmalloc_bytes_allocated += a->allocation_size_in_chunks * CHUNK_SIZE;
  108. g_kmalloc_bytes_free -= a->allocation_size_in_chunks * CHUNK_SIZE;
  109. #ifdef SANITIZE_KMALLOC
  110. memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader));
  111. #endif
  112. return ptr;
  113. }
  114. void* kmalloc_impl(size_t size)
  115. {
  116. ScopedSpinLock lock(s_lock);
  117. ++g_kmalloc_call_count;
  118. if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
  119. dbg() << "kmalloc(" << size << ")";
  120. Kernel::dump_backtrace();
  121. }
  122. // We need space for the AllocationHeader at the head of the block.
  123. size_t real_size = size + sizeof(AllocationHeader);
  124. if (g_kmalloc_bytes_free < real_size) {
  125. Kernel::dump_backtrace();
  126. klog() << "kmalloc(): PANIC! Out of memory\nsum_free=" << g_kmalloc_bytes_free << ", real_size=" << real_size;
  127. Kernel::hang();
  128. }
  129. size_t chunks_needed = (real_size + CHUNK_SIZE - 1) / CHUNK_SIZE;
  130. Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
  131. Optional<size_t> first_chunk;
  132. // Choose the right politic for allocation.
  133. constexpr u32 best_fit_threshold = 128;
  134. if (chunks_needed < best_fit_threshold) {
  135. first_chunk = bitmap_wrapper.find_first_fit(chunks_needed);
  136. } else {
  137. first_chunk = bitmap_wrapper.find_best_fit(chunks_needed);
  138. }
  139. if (!first_chunk.has_value()) {
  140. klog() << "kmalloc(): PANIC! Out of memory (no suitable block for size " << size << ")";
  141. Kernel::dump_backtrace();
  142. Kernel::hang();
  143. }
  144. return kmalloc_allocate(first_chunk.value(), chunks_needed);
  145. }
  146. static inline void kfree_impl(void* ptr)
  147. {
  148. ++g_kfree_call_count;
  149. auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
  150. FlatPtr start = ((FlatPtr)a - (FlatPtr)BASE_PHYSICAL) / CHUNK_SIZE;
  151. Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
  152. bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false);
  153. g_kmalloc_bytes_allocated -= a->allocation_size_in_chunks * CHUNK_SIZE;
  154. g_kmalloc_bytes_free += a->allocation_size_in_chunks * CHUNK_SIZE;
  155. #ifdef SANITIZE_KMALLOC
  156. memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE);
  157. #endif
  158. }
  159. void kfree(void* ptr)
  160. {
  161. if (!ptr)
  162. return;
  163. ScopedSpinLock lock(s_lock);
  164. kfree_impl(ptr);
  165. }
  166. void* krealloc(void* ptr, size_t new_size)
  167. {
  168. if (!ptr)
  169. return kmalloc(new_size);
  170. ScopedSpinLock lock(s_lock);
  171. auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
  172. size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE;
  173. if (old_size == new_size)
  174. return ptr;
  175. auto* new_ptr = kmalloc(new_size);
  176. memcpy(new_ptr, ptr, min(old_size, new_size));
  177. kfree_impl(ptr);
  178. return new_ptr;
  179. }
  180. void* operator new(size_t size)
  181. {
  182. return kmalloc(size);
  183. }
  184. void* operator new[](size_t size)
  185. {
  186. return kmalloc(size);
  187. }