kmalloc.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Really really *really* Q&D malloc() and free() implementations
  3. * just to get going. Don't ever let anyone see this shit. :^)
  4. */
  5. #include "types.h"
  6. #include "kmalloc.h"
  7. #include "StdLib.h"
  8. #include "i386.h"
  9. #include "VGA.h"
  10. #include "system.h"
  11. #include "Assertions.h"
  12. #define SANITIZE_KMALLOC
  13. typedef struct
  14. {
  15. DWORD start;
  16. DWORD nchunk;
  17. } PACKED allocation_t;
  18. #define CHUNK_SIZE 128
  19. #define POOL_SIZE (1024 * 1024)
  20. #define PAGE_ALIGNED_BASE_PHYSICAL 0x300000
  21. #define ETERNAL_BASE_PHYSICAL 0x200000
  22. #define BASE_PHYS 0x100000
  23. PRIVATE BYTE alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
  24. volatile DWORD sum_alloc = 0;
  25. volatile DWORD sum_free = POOL_SIZE;
  26. volatile size_t kmalloc_sum_eternal = 0;
  27. volatile size_t kmalloc_sum_page_aligned = 0;
  28. static byte* s_next_eternal_ptr;
  29. static byte* s_next_page_aligned_ptr;
  30. bool is_kmalloc_address(void* ptr)
  31. {
  32. if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
  33. return true;
  34. if (ptr >= (byte*)PAGE_ALIGNED_BASE_PHYSICAL && ptr < s_next_page_aligned_ptr)
  35. return true;
  36. return ptr >= (void*)BASE_PHYS && ptr <= ((void*)BASE_PHYS + POOL_SIZE);
  37. }
  38. PUBLIC void
  39. kmalloc_init()
  40. {
  41. memset( &alloc_map, 0, sizeof(alloc_map) );
  42. memset( (void *)BASE_PHYS, 0, POOL_SIZE );
  43. kmalloc_sum_eternal = 0;
  44. kmalloc_sum_page_aligned = 0;
  45. sum_alloc = 0;
  46. sum_free = POOL_SIZE;
  47. s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
  48. s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
  49. }
  50. void* kmalloc_eternal(size_t size)
  51. {
  52. void* ptr = s_next_eternal_ptr;
  53. s_next_eternal_ptr += size;
  54. kmalloc_sum_eternal += size;
  55. return ptr;
  56. }
  57. void* kmalloc_page_aligned(size_t size)
  58. {
  59. ASSERT((size % 4096) == 0);
  60. void* ptr = s_next_page_aligned_ptr;
  61. s_next_page_aligned_ptr += size;
  62. kmalloc_sum_page_aligned += size;
  63. return ptr;
  64. }
  65. PUBLIC void *
  66. kmalloc( DWORD size )
  67. {
  68. InterruptDisabler disabler;
  69. DWORD chunks_needed, chunks_here, first_chunk;
  70. DWORD real_size;
  71. DWORD i, j, k;
  72. /* We need space for the allocation_t structure at the head of the block. */
  73. real_size = size + sizeof(allocation_t);
  74. if (sum_free < real_size) {
  75. kprintf("kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=%u, real_size=%x\n", sum_free, real_size);
  76. HANG;
  77. return 0L;
  78. }
  79. chunks_needed = real_size / CHUNK_SIZE;
  80. if( real_size % CHUNK_SIZE )
  81. chunks_needed++;
  82. chunks_here = 0;
  83. first_chunk = 0;
  84. for( i = 0; i < (POOL_SIZE / CHUNK_SIZE / 8); ++i )
  85. {
  86. for( j = 0; j < 8; ++j )
  87. {
  88. if( !(alloc_map[i] & (1<<j)) )
  89. {
  90. if( chunks_here == 0 )
  91. {
  92. /* Mark where potential allocation starts. */
  93. first_chunk = i * 8 + j;
  94. }
  95. chunks_here++;
  96. if( chunks_here == chunks_needed )
  97. {
  98. auto* a = (allocation_t *)(BASE_PHYS + (first_chunk * CHUNK_SIZE));
  99. BYTE *ptr = (BYTE *)a;
  100. ptr += sizeof(allocation_t);
  101. a->nchunk = chunks_needed;
  102. a->start = first_chunk;
  103. for( k = first_chunk; k < (first_chunk + chunks_needed); ++k )
  104. {
  105. alloc_map[k / 8] |= 1 << (k % 8);
  106. }
  107. sum_alloc += a->nchunk * CHUNK_SIZE;
  108. sum_free -= a->nchunk * CHUNK_SIZE;
  109. #ifdef SANITIZE_KMALLOC
  110. memset(ptr, 0xbb, (a->nchunk * CHUNK_SIZE) - sizeof(allocation_t));
  111. #endif
  112. return ptr;
  113. }
  114. }
  115. else
  116. {
  117. /* This is in use, so restart chunks_here counter. */
  118. chunks_here = 0;
  119. }
  120. }
  121. }
  122. kprintf("kmalloc(): PANIC! Out of memory (no suitable block for size %u)\n", size);
  123. HANG;
  124. return nullptr;
  125. }
  126. PUBLIC void
  127. kfree( void *ptr )
  128. {
  129. if( !ptr )
  130. return;
  131. InterruptDisabler disabler;
  132. allocation_t *a = (allocation_t *)((((BYTE *)ptr) - sizeof(allocation_t)));
  133. #if 0
  134. DWORD hdr = (DWORD)a;
  135. DWORD mhdr = hdr & ~0x7;
  136. kprintf("hdr / mhdr %p / %p\n", hdr, mhdr);
  137. ASSERT(hdr == mhdr);
  138. #endif
  139. for (DWORD k = a->start; k < (a->start + a->nchunk); ++k) {
  140. alloc_map[k / 8] &= ~(1 << (k % 8));
  141. }
  142. sum_alloc -= a->nchunk * CHUNK_SIZE;
  143. sum_free += a->nchunk * CHUNK_SIZE;
  144. #ifdef SANITIZE_KMALLOC
  145. memset(a, 0xaa, a->nchunk * CHUNK_SIZE);
  146. #endif
  147. }
  148. void* operator new(unsigned int size)
  149. {
  150. return kmalloc(size);
  151. }
  152. void* operator new[](unsigned int size)
  153. {
  154. return kmalloc(size);
  155. }
  156. void operator delete(void* ptr)
  157. {
  158. return kfree(ptr);
  159. }
  160. void operator delete[](void* ptr)
  161. {
  162. return kfree(ptr);
  163. }
  164. void operator delete(void* ptr, unsigned int)
  165. {
  166. return kfree(ptr);
  167. }
  168. void operator delete[](void* ptr, unsigned int)
  169. {
  170. return kfree(ptr);
  171. }