kmalloc.cpp 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Really really *really* Q&D malloc() and free() implementations
  3. * just to get going. Don't ever let anyone see this shit. :^)
  4. */
  5. #include "types.h"
  6. #include "kmalloc.h"
  7. #include "StdLib.h"
  8. #include "i386.h"
  9. #include "system.h"
  10. #include <AK/Assertions.h>
  11. #define SANITIZE_KMALLOC
  12. typedef struct
  13. {
  14. DWORD start;
  15. DWORD nchunk;
  16. } PACKED allocation_t;
  17. #define CHUNK_SIZE 128
  18. #define POOL_SIZE (1024 * 1024)
  19. #define PAGE_ALIGNED_BASE_PHYSICAL 0x300000
  20. #define ETERNAL_BASE_PHYSICAL 0x200000
  21. #define BASE_PHYS 0x100000
  22. #define RANGE_SIZE 0x100000
  23. static byte alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
  24. volatile DWORD sum_alloc = 0;
  25. volatile DWORD sum_free = POOL_SIZE;
  26. volatile size_t kmalloc_sum_eternal = 0;
  27. volatile size_t kmalloc_sum_page_aligned = 0;
  28. static byte* s_next_eternal_ptr;
  29. static byte* s_next_page_aligned_ptr;
  30. static byte* s_end_of_eternal_range;
  31. static byte* s_end_of_page_aligned_range;
  32. bool is_kmalloc_address(void* ptr)
  33. {
  34. if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
  35. return true;
  36. if (ptr >= (byte*)PAGE_ALIGNED_BASE_PHYSICAL && ptr < s_next_page_aligned_ptr)
  37. return true;
  38. return (dword)ptr >= BASE_PHYS && (dword)ptr <= (BASE_PHYS + POOL_SIZE);
  39. }
  40. void kmalloc_init()
  41. {
  42. memset( &alloc_map, 0, sizeof(alloc_map) );
  43. memset( (void *)BASE_PHYS, 0, POOL_SIZE );
  44. kmalloc_sum_eternal = 0;
  45. kmalloc_sum_page_aligned = 0;
  46. sum_alloc = 0;
  47. sum_free = POOL_SIZE;
  48. s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
  49. s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
  50. s_end_of_eternal_range = s_next_eternal_ptr + RANGE_SIZE;
  51. s_end_of_page_aligned_range = s_next_page_aligned_ptr + RANGE_SIZE;
  52. }
  53. void* kmalloc_eternal(size_t size)
  54. {
  55. void* ptr = s_next_eternal_ptr;
  56. s_next_eternal_ptr += size;
  57. ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
  58. kmalloc_sum_eternal += size;
  59. return ptr;
  60. }
  61. void* kmalloc_page_aligned(size_t size)
  62. {
  63. ASSERT((size % PAGE_SIZE) == 0);
  64. void* ptr = s_next_page_aligned_ptr;
  65. s_next_page_aligned_ptr += size;
  66. ASSERT(s_next_page_aligned_ptr < s_end_of_page_aligned_range);
  67. kmalloc_sum_page_aligned += size;
  68. return ptr;
  69. }
  70. void* kmalloc(dword size)
  71. {
  72. InterruptDisabler disabler;
  73. DWORD chunks_needed, chunks_here, first_chunk;
  74. DWORD real_size;
  75. DWORD i, j, k;
  76. /* We need space for the allocation_t structure at the head of the block. */
  77. real_size = size + sizeof(allocation_t);
  78. if (sum_free < real_size) {
  79. kprintf("kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=%u, real_size=%x\n", sum_free, real_size);
  80. HANG;
  81. return 0L;
  82. }
  83. chunks_needed = real_size / CHUNK_SIZE;
  84. if( real_size % CHUNK_SIZE )
  85. chunks_needed++;
  86. chunks_here = 0;
  87. first_chunk = 0;
  88. for( i = 0; i < (POOL_SIZE / CHUNK_SIZE / 8); ++i )
  89. {
  90. for( j = 0; j < 8; ++j )
  91. {
  92. if( !(alloc_map[i] & (1<<j)) )
  93. {
  94. if( chunks_here == 0 )
  95. {
  96. /* Mark where potential allocation starts. */
  97. first_chunk = i * 8 + j;
  98. }
  99. chunks_here++;
  100. if( chunks_here == chunks_needed )
  101. {
  102. auto* a = (allocation_t *)(BASE_PHYS + (first_chunk * CHUNK_SIZE));
  103. BYTE *ptr = (BYTE *)a;
  104. ptr += sizeof(allocation_t);
  105. a->nchunk = chunks_needed;
  106. a->start = first_chunk;
  107. for( k = first_chunk; k < (first_chunk + chunks_needed); ++k )
  108. {
  109. alloc_map[k / 8] |= 1 << (k % 8);
  110. }
  111. sum_alloc += a->nchunk * CHUNK_SIZE;
  112. sum_free -= a->nchunk * CHUNK_SIZE;
  113. #ifdef SANITIZE_KMALLOC
  114. memset(ptr, 0xbb, (a->nchunk * CHUNK_SIZE) - sizeof(allocation_t));
  115. #endif
  116. return ptr;
  117. }
  118. }
  119. else
  120. {
  121. /* This is in use, so restart chunks_here counter. */
  122. chunks_here = 0;
  123. }
  124. }
  125. }
  126. kprintf("kmalloc(): PANIC! Out of memory (no suitable block for size %u)\n", size);
  127. HANG;
  128. return nullptr;
  129. }
  130. void kfree(void *ptr)
  131. {
  132. if( !ptr )
  133. return;
  134. InterruptDisabler disabler;
  135. allocation_t *a = (allocation_t *)((((BYTE *)ptr) - sizeof(allocation_t)));
  136. #if 0
  137. DWORD hdr = (DWORD)a;
  138. DWORD mhdr = hdr & ~0x7;
  139. kprintf("hdr / mhdr %p / %p\n", hdr, mhdr);
  140. ASSERT(hdr == mhdr);
  141. #endif
  142. for (DWORD k = a->start; k < (a->start + a->nchunk); ++k) {
  143. alloc_map[k / 8] &= ~(1 << (k % 8));
  144. }
  145. sum_alloc -= a->nchunk * CHUNK_SIZE;
  146. sum_free += a->nchunk * CHUNK_SIZE;
  147. #ifdef SANITIZE_KMALLOC
  148. memset(a, 0xaa, a->nchunk * CHUNK_SIZE);
  149. #endif
  150. }
  151. void* operator new(unsigned int size)
  152. {
  153. return kmalloc(size);
  154. }
  155. void* operator new[](unsigned int size)
  156. {
  157. return kmalloc(size);
  158. }
  159. void operator delete(void* ptr)
  160. {
  161. return kfree(ptr);
  162. }
  163. void operator delete[](void* ptr)
  164. {
  165. return kfree(ptr);
  166. }
  167. void operator delete(void* ptr, unsigned int)
  168. {
  169. return kfree(ptr);
  170. }
  171. void operator delete[](void* ptr, unsigned int)
  172. {
  173. return kfree(ptr);
  174. }