malloc.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. #include <AK/Bitmap.h>
  2. #include <AK/InlineLinkedList.h>
  3. #include <AK/ScopedValueRollback.h>
  4. #include <AK/Vector.h>
  5. #include <LibThread/Lock.h>
  6. #include <assert.h>
  7. #include <mallocdefs.h>
  8. #include <serenity.h>
  9. #include <stdio.h>
  10. #include <stdlib.h>
  11. #include <sys/mman.h>
  12. // FIXME: Thread safety.
  13. //#define MALLOC_DEBUG
  14. #define RECYCLE_BIG_ALLOCATIONS
  15. #define MAGIC_PAGE_HEADER 0x42657274
  16. #define MAGIC_BIGALLOC_HEADER 0x42697267
  17. #define PAGE_ROUND_UP(x) ((((size_t)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
  18. static LibThread::Lock& malloc_lock()
  19. {
  20. static u32 lock_storage[sizeof(LibThread::Lock) / sizeof(u32)];
  21. return *reinterpret_cast<LibThread::Lock*>(&lock_storage);
  22. }
  23. constexpr int number_of_chunked_blocks_to_keep_around_per_size_class = 32;
  24. constexpr int number_of_big_blocks_to_keep_around_per_size_class = 8;
  25. static bool s_log_malloc = false;
  26. static bool s_scrub_malloc = true;
  27. static bool s_scrub_free = true;
  28. static unsigned short size_classes[] = { 8, 16, 32, 64, 128, 252, 508, 1016, 2036, 0 };
  29. static constexpr size_t num_size_classes = sizeof(size_classes) / sizeof(unsigned short);
  30. struct CommonHeader {
  31. size_t m_magic;
  32. size_t m_size;
  33. };
  34. struct BigAllocationBlock : public CommonHeader {
  35. BigAllocationBlock(size_t size)
  36. {
  37. m_magic = MAGIC_BIGALLOC_HEADER;
  38. m_size = size;
  39. }
  40. unsigned char* m_slot[0];
  41. };
  42. struct FreelistEntry {
  43. FreelistEntry* next;
  44. };
  45. struct ChunkedBlock : public CommonHeader
  46. , public InlineLinkedListNode<ChunkedBlock> {
  47. ChunkedBlock(size_t bytes_per_chunk)
  48. {
  49. m_magic = MAGIC_PAGE_HEADER;
  50. m_size = bytes_per_chunk;
  51. m_free_chunks = chunk_capacity();
  52. m_freelist = (FreelistEntry*)chunk(0);
  53. for (size_t i = 0; i < chunk_capacity(); ++i) {
  54. auto* entry = (FreelistEntry*)chunk(i);
  55. if (i != chunk_capacity() - 1)
  56. entry->next = (FreelistEntry*)chunk(i + 1);
  57. else
  58. entry->next = nullptr;
  59. }
  60. }
  61. ChunkedBlock* m_prev { nullptr };
  62. ChunkedBlock* m_next { nullptr };
  63. FreelistEntry* m_freelist { nullptr };
  64. unsigned short m_free_chunks { 0 };
  65. unsigned char m_slot[0];
  66. void* chunk(int index)
  67. {
  68. return &m_slot[index * m_size];
  69. }
  70. bool is_full() const { return m_free_chunks == 0; }
  71. size_t bytes_per_chunk() const { return m_size; }
  72. size_t free_chunks() const { return m_free_chunks; }
  73. size_t used_chunks() const { return chunk_capacity() - m_free_chunks; }
  74. size_t chunk_capacity() const { return (PAGE_SIZE - sizeof(ChunkedBlock)) / m_size; }
  75. };
  76. struct Allocator {
  77. size_t size { 0 };
  78. size_t block_count { 0 };
  79. size_t empty_block_count { 0 };
  80. ChunkedBlock* empty_blocks[number_of_chunked_blocks_to_keep_around_per_size_class] { nullptr };
  81. InlineLinkedList<ChunkedBlock> usable_blocks;
  82. InlineLinkedList<ChunkedBlock> full_blocks;
  83. };
  84. struct BigAllocator {
  85. Vector<BigAllocationBlock*, number_of_big_blocks_to_keep_around_per_size_class> blocks;
  86. };
  87. static Allocator g_allocators[num_size_classes];
  88. static BigAllocator g_big_allocators[1];
  89. static Allocator* allocator_for_size(size_t size, size_t& good_size)
  90. {
  91. for (int i = 0; size_classes[i]; ++i) {
  92. if (size <= size_classes[i]) {
  93. good_size = size_classes[i];
  94. return &g_allocators[i];
  95. }
  96. }
  97. good_size = PAGE_ROUND_UP(size);
  98. return nullptr;
  99. }
  100. static BigAllocator* big_allocator_for_size(size_t size)
  101. {
  102. if (size == 4096)
  103. return &g_big_allocators[0];
  104. return nullptr;
  105. }
  106. extern "C" {
  107. size_t malloc_good_size(size_t size)
  108. {
  109. for (int i = 0; size_classes[i]; ++i) {
  110. if (size < size_classes[i])
  111. return size_classes[i];
  112. }
  113. return PAGE_ROUND_UP(size);
  114. }
  115. static void* os_alloc(size_t size, const char* name)
  116. {
  117. return mmap_with_name(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, name);
  118. }
  119. static void os_free(void* ptr, size_t size)
  120. {
  121. int rc = munmap(ptr, size);
  122. assert(rc == 0);
  123. }
  124. void* malloc(size_t size)
  125. {
  126. LOCKER(malloc_lock());
  127. if (s_log_malloc)
  128. dbgprintf("LibC: malloc(%zu)\n", size);
  129. if (!size)
  130. return nullptr;
  131. size_t good_size;
  132. auto* allocator = allocator_for_size(size, good_size);
  133. if (!allocator) {
  134. size_t real_size = PAGE_ROUND_UP(sizeof(BigAllocationBlock) + size);
  135. #ifdef RECYCLE_BIG_ALLOCATIONS
  136. if (auto* allocator = big_allocator_for_size(real_size)) {
  137. if (!allocator->blocks.is_empty()) {
  138. auto* block = allocator->blocks.take_last();
  139. if (mprotect(block, real_size, PROT_READ | PROT_WRITE) < 0) {
  140. perror("mprotect");
  141. ASSERT_NOT_REACHED();
  142. }
  143. set_mmap_name(block, PAGE_SIZE, "malloc: BigAllocationBlock (reused)");
  144. return &block->m_slot[0];
  145. }
  146. }
  147. #endif
  148. auto* block = (BigAllocationBlock*)os_alloc(real_size, "malloc: BigAllocationBlock");
  149. new (block) BigAllocationBlock(real_size);
  150. return &block->m_slot[0];
  151. }
  152. ChunkedBlock* block = nullptr;
  153. for (block = allocator->usable_blocks.head(); block; block = block->next()) {
  154. if (block->free_chunks())
  155. break;
  156. }
  157. if (!block && allocator->empty_block_count) {
  158. block = allocator->empty_blocks[--allocator->empty_block_count];
  159. int rc = mprotect(block, PAGE_SIZE, PROT_READ | PROT_WRITE);
  160. if (rc < 0) {
  161. perror("mprotect");
  162. ASSERT_NOT_REACHED();
  163. }
  164. char buffer[64];
  165. snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu) (reused)", good_size);
  166. set_mmap_name(block, PAGE_SIZE, buffer);
  167. allocator->usable_blocks.append(block);
  168. }
  169. if (!block) {
  170. char buffer[64];
  171. snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
  172. block = (ChunkedBlock*)os_alloc(PAGE_SIZE, buffer);
  173. new (block) ChunkedBlock(good_size);
  174. allocator->usable_blocks.append(block);
  175. ++allocator->block_count;
  176. }
  177. --block->m_free_chunks;
  178. void* ptr = block->m_freelist;
  179. block->m_freelist = block->m_freelist->next;
  180. if (block->is_full()) {
  181. #ifdef MALLOC_DEBUG
  182. dbgprintf("Block %p is now full in size class %zu\n", block, good_size);
  183. #endif
  184. allocator->usable_blocks.remove(block);
  185. allocator->full_blocks.append(block);
  186. }
  187. #ifdef MALLOC_DEBUG
  188. dbgprintf("LibC: allocated %p (chunk in block %p, size %zu)\n", ptr, block, block->bytes_per_chunk());
  189. #endif
  190. if (s_scrub_malloc)
  191. memset(ptr, MALLOC_SCRUB_BYTE, block->m_size);
  192. return ptr;
  193. }
  194. void free(void* ptr)
  195. {
  196. ScopedValueRollback rollback(errno);
  197. if (!ptr)
  198. return;
  199. LOCKER(malloc_lock());
  200. void* page_base = (void*)((uintptr_t)ptr & (uintptr_t)~0xfff);
  201. size_t magic = *(size_t*)page_base;
  202. if (magic == MAGIC_BIGALLOC_HEADER) {
  203. auto* block = (BigAllocationBlock*)page_base;
  204. #ifdef RECYCLE_BIG_ALLOCATIONS
  205. if (auto* allocator = big_allocator_for_size(block->m_size)) {
  206. if (allocator->blocks.size() < number_of_big_blocks_to_keep_around_per_size_class) {
  207. allocator->blocks.append(block);
  208. set_mmap_name(block, PAGE_SIZE, "malloc: BigAllocationBlock (free)");
  209. if (mprotect(block, PAGE_SIZE, PROT_NONE) < 0) {
  210. perror("mprotect");
  211. ASSERT_NOT_REACHED();
  212. }
  213. return;
  214. }
  215. }
  216. #endif
  217. os_free(block, block->m_size);
  218. return;
  219. }
  220. assert(magic == MAGIC_PAGE_HEADER);
  221. auto* block = (ChunkedBlock*)page_base;
  222. #ifdef MALLOC_DEBUG
  223. dbgprintf("LibC: freeing %p in allocator %p (size=%u, used=%u)\n", ptr, block, block->bytes_per_chunk(), block->used_chunks());
  224. #endif
  225. if (s_scrub_free)
  226. memset(ptr, FREE_SCRUB_BYTE, block->bytes_per_chunk());
  227. auto* entry = (FreelistEntry*)ptr;
  228. entry->next = block->m_freelist;
  229. block->m_freelist = entry;
  230. if (block->is_full()) {
  231. size_t good_size;
  232. auto* allocator = allocator_for_size(block->m_size, good_size);
  233. #ifdef MALLOC_DEBUG
  234. dbgprintf("Block %p no longer full in size class %u\n", block, good_size);
  235. #endif
  236. allocator->full_blocks.remove(block);
  237. allocator->usable_blocks.prepend(block);
  238. }
  239. ++block->m_free_chunks;
  240. if (!block->used_chunks()) {
  241. size_t good_size;
  242. auto* allocator = allocator_for_size(block->m_size, good_size);
  243. if (allocator->block_count < number_of_chunked_blocks_to_keep_around_per_size_class) {
  244. #ifdef MALLOC_DEBUG
  245. dbgprintf("Keeping block %p around for size class %u\n", block, good_size);
  246. #endif
  247. allocator->usable_blocks.remove(block);
  248. allocator->empty_blocks[allocator->empty_block_count++] = block;
  249. char buffer[64];
  250. snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu) (free)", good_size);
  251. set_mmap_name(block, PAGE_SIZE, buffer);
  252. mprotect(block, PAGE_SIZE, PROT_NONE);
  253. return;
  254. }
  255. #ifdef MALLOC_DEBUG
  256. dbgprintf("Releasing block %p for size class %u\n", block, good_size);
  257. #endif
  258. allocator->usable_blocks.remove(block);
  259. --allocator->block_count;
  260. os_free(block, PAGE_SIZE);
  261. }
  262. }
  263. void* calloc(size_t count, size_t size)
  264. {
  265. size_t new_size = count * size;
  266. auto* ptr = malloc(new_size);
  267. memset(ptr, 0, new_size);
  268. return ptr;
  269. }
  270. size_t malloc_size(void* ptr)
  271. {
  272. if (!ptr)
  273. return 0;
  274. LOCKER(malloc_lock());
  275. void* page_base = (void*)((uintptr_t)ptr & (uintptr_t)~0xfff);
  276. auto* header = (const CommonHeader*)page_base;
  277. auto size = header->m_size;
  278. if (header->m_magic == MAGIC_BIGALLOC_HEADER)
  279. size -= sizeof(CommonHeader);
  280. return size;
  281. }
  282. void* realloc(void* ptr, size_t size)
  283. {
  284. if (!ptr)
  285. return malloc(size);
  286. LOCKER(malloc_lock());
  287. auto existing_allocation_size = malloc_size(ptr);
  288. if (size <= existing_allocation_size)
  289. return ptr;
  290. auto* new_ptr = malloc(size);
  291. memcpy(new_ptr, ptr, min(existing_allocation_size, size));
  292. free(ptr);
  293. return new_ptr;
  294. }
  295. void __malloc_init()
  296. {
  297. new (&malloc_lock()) LibThread::Lock();
  298. if (getenv("LIBC_NOSCRUB_MALLOC"))
  299. s_scrub_malloc = false;
  300. if (getenv("LIBC_NOSCRUB_FREE"))
  301. s_scrub_free = false;
  302. if (getenv("LIBC_LOG_MALLOC"))
  303. s_log_malloc = true;
  304. }
  305. }