kmalloc.cpp 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. /*
  27. * Really really *really* Q&D malloc() and free() implementations
  28. * just to get going. Don't ever let anyone see this shit. :^)
  29. */
  30. #include <AK/Assertions.h>
  31. #include <AK/Types.h>
  32. #include <Kernel/Arch/i386/CPU.h>
  33. #include <Kernel/KSyms.h>
  34. #include <Kernel/Process.h>
  35. #include <Kernel/Scheduler.h>
  36. #include <Kernel/StdLib.h>
  37. #include <Kernel/Heap/kmalloc.h>
  38. #define SANITIZE_KMALLOC
  39. struct [[gnu::packed]] allocation_t
  40. {
  41. size_t start;
  42. size_t nchunk;
  43. };
  44. #define BASE_PHYSICAL (0xc0000000 + (4 * MB))
  45. #define CHUNK_SIZE 8
  46. #define POOL_SIZE (3 * MB)
  47. #define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MB))
  48. #define ETERNAL_RANGE_SIZE (2 * MB)
  49. static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
  50. volatile size_t sum_alloc = 0;
  51. volatile size_t sum_free = POOL_SIZE;
  52. volatile size_t kmalloc_sum_eternal = 0;
  53. u32 g_kmalloc_call_count;
  54. u32 g_kfree_call_count;
  55. bool g_dump_kmalloc_stacks;
  56. static u8* s_next_eternal_ptr;
  57. static u8* s_end_of_eternal_range;
  58. bool is_kmalloc_address(const void* ptr)
  59. {
  60. if (ptr >= (u8*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
  61. return true;
  62. return (size_t)ptr >= BASE_PHYSICAL && (size_t)ptr <= (BASE_PHYSICAL + POOL_SIZE);
  63. }
  64. void kmalloc_init()
  65. {
  66. memset(&alloc_map, 0, sizeof(alloc_map));
  67. memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
  68. kmalloc_sum_eternal = 0;
  69. sum_alloc = 0;
  70. sum_free = POOL_SIZE;
  71. s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
  72. s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
  73. }
  74. void* kmalloc_eternal(size_t size)
  75. {
  76. void* ptr = s_next_eternal_ptr;
  77. s_next_eternal_ptr += size;
  78. ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
  79. kmalloc_sum_eternal += size;
  80. return ptr;
  81. }
  82. void* kmalloc_aligned(size_t size, size_t alignment)
  83. {
  84. void* ptr = kmalloc(size + alignment + sizeof(void*));
  85. size_t max_addr = (size_t)ptr + alignment;
  86. void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
  87. ((void**)aligned_ptr)[-1] = ptr;
  88. return aligned_ptr;
  89. }
  90. void kfree_aligned(void* ptr)
  91. {
  92. kfree(((void**)ptr)[-1]);
  93. }
  94. void* kmalloc_page_aligned(size_t size)
  95. {
  96. void* ptr = kmalloc_aligned(size, PAGE_SIZE);
  97. size_t d = (size_t)ptr;
  98. ASSERT((d & PAGE_MASK) == d);
  99. return ptr;
  100. }
  101. void* kmalloc_impl(size_t size)
  102. {
  103. InterruptDisabler disabler;
  104. ++g_kmalloc_call_count;
  105. if (g_dump_kmalloc_stacks && ksyms_ready) {
  106. dbgprintf("kmalloc(%u)\n", size);
  107. dump_backtrace();
  108. }
  109. // We need space for the allocation_t structure at the head of the block.
  110. size_t real_size = size + sizeof(allocation_t);
  111. if (sum_free < real_size) {
  112. dump_backtrace();
  113. kprintf("%s(%u) kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=%u, real_size=%u\n", current->process().name().characters(), current->pid(), sum_free, real_size);
  114. hang();
  115. }
  116. size_t chunks_needed = real_size / CHUNK_SIZE;
  117. if (real_size % CHUNK_SIZE)
  118. ++chunks_needed;
  119. size_t chunks_here = 0;
  120. size_t first_chunk = 0;
  121. for (size_t i = 0; i < (POOL_SIZE / CHUNK_SIZE / 8); ++i) {
  122. if (alloc_map[i] == 0xff) {
  123. // Skip over completely full bucket.
  124. chunks_here = 0;
  125. continue;
  126. }
  127. // FIXME: This scan can be optimized further with LZCNT.
  128. for (size_t j = 0; j < 8; ++j) {
  129. if (!(alloc_map[i] & (1 << j))) {
  130. if (chunks_here == 0) {
  131. // Mark where potential allocation starts.
  132. first_chunk = i * 8 + j;
  133. }
  134. ++chunks_here;
  135. if (chunks_here == chunks_needed) {
  136. auto* a = (allocation_t*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
  137. u8* ptr = (u8*)a;
  138. ptr += sizeof(allocation_t);
  139. a->nchunk = chunks_needed;
  140. a->start = first_chunk;
  141. for (size_t k = first_chunk; k < (first_chunk + chunks_needed); ++k) {
  142. alloc_map[k / 8] |= 1 << (k % 8);
  143. }
  144. sum_alloc += a->nchunk * CHUNK_SIZE;
  145. sum_free -= a->nchunk * CHUNK_SIZE;
  146. #ifdef SANITIZE_KMALLOC
  147. memset(ptr, 0xbb, (a->nchunk * CHUNK_SIZE) - sizeof(allocation_t));
  148. #endif
  149. return ptr;
  150. }
  151. } else {
  152. // This is in use, so restart chunks_here counter.
  153. chunks_here = 0;
  154. }
  155. }
  156. }
  157. kprintf("%s(%u) kmalloc(): PANIC! Out of memory (no suitable block for size %u)\n", current->process().name().characters(), current->pid(), size);
  158. dump_backtrace();
  159. hang();
  160. }
  161. void kfree(void* ptr)
  162. {
  163. if (!ptr)
  164. return;
  165. InterruptDisabler disabler;
  166. ++g_kfree_call_count;
  167. auto* a = (allocation_t*)((((u8*)ptr) - sizeof(allocation_t)));
  168. for (size_t k = a->start; k < (a->start + a->nchunk); ++k)
  169. alloc_map[k / 8] &= ~(1 << (k % 8));
  170. sum_alloc -= a->nchunk * CHUNK_SIZE;
  171. sum_free += a->nchunk * CHUNK_SIZE;
  172. #ifdef SANITIZE_KMALLOC
  173. memset(a, 0xaa, a->nchunk * CHUNK_SIZE);
  174. #endif
  175. }
  176. void* krealloc(void* ptr, size_t new_size)
  177. {
  178. if (!ptr)
  179. return kmalloc(new_size);
  180. InterruptDisabler disabler;
  181. auto* a = (allocation_t*)((((u8*)ptr) - sizeof(allocation_t)));
  182. size_t old_size = a->nchunk * CHUNK_SIZE;
  183. if (old_size == new_size)
  184. return ptr;
  185. auto* new_ptr = kmalloc(new_size);
  186. memcpy(new_ptr, ptr, min(old_size, new_size));
  187. kfree(ptr);
  188. return new_ptr;
  189. }
  190. void* operator new(size_t size)
  191. {
  192. return kmalloc(size);
  193. }
  194. void* operator new[](size_t size)
  195. {
  196. return kmalloc(size);
  197. }