mallocdefs.h 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/IntrusiveList.h>
  8. #include <AK/Types.h>
  9. #define MAGIC_PAGE_HEADER 0x42657274 // 'Bert'
  10. #define MAGIC_BIGALLOC_HEADER 0x42697267 // 'Birg'
  11. #define MALLOC_SCRUB_BYTE 0xdc
  12. #define FREE_SCRUB_BYTE 0xed
  13. #define PAGE_ROUND_UP(x) ((((size_t)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
  14. static constexpr unsigned short size_classes[] = { 8, 16, 32, 64, 128, 256, 504, 1016, 2032, 4088, 8184, 16376, 32752, 0 };
  15. static constexpr size_t num_size_classes = (sizeof(size_classes) / sizeof(unsigned short)) - 1;
  16. consteval bool check_size_classes_alignment()
  17. {
  18. for (size_t i = 0; i < num_size_classes; i++) {
  19. if ((size_classes[i] % 8) != 0)
  20. return false;
  21. }
  22. return true;
  23. }
  24. static_assert(check_size_classes_alignment());
  25. struct CommonHeader {
  26. size_t m_magic;
  27. size_t m_size;
  28. };
  29. struct BigAllocationBlock : public CommonHeader {
  30. BigAllocationBlock(size_t size)
  31. {
  32. m_magic = MAGIC_BIGALLOC_HEADER;
  33. m_size = size;
  34. }
  35. unsigned char* m_slot[0];
  36. };
  37. struct FreelistEntry {
  38. FreelistEntry* next;
  39. };
  40. struct ChunkedBlock : public CommonHeader {
  41. static constexpr size_t block_size = 64 * KiB;
  42. static constexpr size_t block_mask = ~(block_size - 1);
  43. ChunkedBlock(size_t bytes_per_chunk)
  44. {
  45. m_magic = MAGIC_PAGE_HEADER;
  46. m_size = bytes_per_chunk;
  47. m_free_chunks = chunk_capacity();
  48. }
  49. IntrusiveListNode<ChunkedBlock> m_list_node;
  50. size_t m_next_lazy_freelist_index { 0 };
  51. FreelistEntry* m_freelist { nullptr };
  52. size_t m_free_chunks { 0 };
  53. [[gnu::aligned(8)]] unsigned char m_slot[0];
  54. void* chunk(size_t index)
  55. {
  56. return &m_slot[index * m_size];
  57. }
  58. bool is_full() const { return m_free_chunks == 0; }
  59. size_t bytes_per_chunk() const { return m_size; }
  60. size_t free_chunks() const { return m_free_chunks; }
  61. size_t used_chunks() const { return chunk_capacity() - m_free_chunks; }
  62. size_t chunk_capacity() const { return (block_size - sizeof(ChunkedBlock)) / m_size; }
  63. using List = IntrusiveList<ChunkedBlock, RawPtr<ChunkedBlock>, &ChunkedBlock::m_list_node>;
  64. };