SlabAllocator.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/Memory.h>
  8. #include <Kernel/Heap/SlabAllocator.h>
  9. #include <Kernel/Heap/kmalloc.h>
  10. #include <Kernel/Memory/Region.h>
  11. #include <Kernel/Sections.h>
  12. #define SANITIZE_SLABS
  13. namespace Kernel {
  14. template<size_t templated_slab_size>
  15. class SlabAllocator {
  16. public:
  17. SlabAllocator() = default;
  18. void init(size_t size)
  19. {
  20. m_base = kmalloc_eternal(size);
  21. m_end = (u8*)m_base + size;
  22. FreeSlab* slabs = (FreeSlab*)m_base;
  23. m_slab_count = size / templated_slab_size;
  24. for (size_t i = 1; i < m_slab_count; ++i) {
  25. slabs[i].next = &slabs[i - 1];
  26. }
  27. slabs[0].next = nullptr;
  28. m_freelist = &slabs[m_slab_count - 1];
  29. m_num_allocated = 0;
  30. }
  31. constexpr size_t slab_size() const { return templated_slab_size; }
  32. size_t slab_count() const { return m_slab_count; }
  33. void* alloc()
  34. {
  35. FreeSlab* free_slab;
  36. {
  37. // We want to avoid being swapped out in the middle of this
  38. ScopedCritical critical;
  39. FreeSlab* next_free;
  40. free_slab = m_freelist.load(AK::memory_order_consume);
  41. do {
  42. if (!free_slab)
  43. return kmalloc(slab_size());
  44. // It's possible another processor is doing the same thing at
  45. // the same time, so next_free *can* be a bogus pointer. However,
  46. // in that case compare_exchange_strong would fail and we would
  47. // try again.
  48. next_free = free_slab->next;
  49. } while (!m_freelist.compare_exchange_strong(free_slab, next_free, AK::memory_order_acq_rel));
  50. m_num_allocated++;
  51. }
  52. #ifdef SANITIZE_SLABS
  53. memset(free_slab, SLAB_ALLOC_SCRUB_BYTE, slab_size());
  54. #endif
  55. return free_slab;
  56. }
  57. void dealloc(void* ptr)
  58. {
  59. VERIFY(ptr);
  60. if (ptr < m_base || ptr >= m_end) {
  61. kfree(ptr);
  62. return;
  63. }
  64. FreeSlab* free_slab = (FreeSlab*)ptr;
  65. #ifdef SANITIZE_SLABS
  66. if (slab_size() > sizeof(FreeSlab*))
  67. memset(free_slab->padding, SLAB_DEALLOC_SCRUB_BYTE, sizeof(FreeSlab::padding));
  68. #endif
  69. // We want to avoid being swapped out in the middle of this
  70. ScopedCritical critical;
  71. FreeSlab* next_free = m_freelist.load(AK::memory_order_consume);
  72. do {
  73. free_slab->next = next_free;
  74. } while (!m_freelist.compare_exchange_strong(next_free, free_slab, AK::memory_order_acq_rel));
  75. m_num_allocated--;
  76. }
  77. size_t num_allocated() const { return m_num_allocated; }
  78. size_t num_free() const { return m_slab_count - m_num_allocated; }
  79. private:
  80. struct FreeSlab {
  81. FreeSlab* next;
  82. char padding[templated_slab_size - sizeof(FreeSlab*)];
  83. };
  84. Atomic<FreeSlab*> m_freelist { nullptr };
  85. Atomic<size_t, AK::MemoryOrder::memory_order_relaxed> m_num_allocated;
  86. size_t m_slab_count;
  87. void* m_base { nullptr };
  88. void* m_end { nullptr };
  89. static_assert(sizeof(FreeSlab) == templated_slab_size);
  90. };
  91. static SlabAllocator<16> s_slab_allocator_16;
  92. static SlabAllocator<32> s_slab_allocator_32;
  93. static SlabAllocator<64> s_slab_allocator_64;
  94. static SlabAllocator<128> s_slab_allocator_128;
  95. static SlabAllocator<256> s_slab_allocator_256;
  96. #if ARCH(I386)
  97. static_assert(sizeof(Memory::Region) <= s_slab_allocator_128.slab_size());
  98. #endif
  99. template<typename Callback>
  100. void for_each_allocator(Callback callback)
  101. {
  102. callback(s_slab_allocator_16);
  103. callback(s_slab_allocator_32);
  104. callback(s_slab_allocator_64);
  105. callback(s_slab_allocator_128);
  106. callback(s_slab_allocator_256);
  107. }
  108. UNMAP_AFTER_INIT void slab_alloc_init()
  109. {
  110. s_slab_allocator_16.init(128 * KiB);
  111. s_slab_allocator_32.init(128 * KiB);
  112. s_slab_allocator_64.init(512 * KiB);
  113. s_slab_allocator_128.init(512 * KiB);
  114. s_slab_allocator_256.init(128 * KiB);
  115. }
  116. void* slab_alloc(size_t slab_size)
  117. {
  118. if (slab_size <= 16)
  119. return s_slab_allocator_16.alloc();
  120. if (slab_size <= 32)
  121. return s_slab_allocator_32.alloc();
  122. if (slab_size <= 64)
  123. return s_slab_allocator_64.alloc();
  124. if (slab_size <= 128)
  125. return s_slab_allocator_128.alloc();
  126. if (slab_size <= 256)
  127. return s_slab_allocator_256.alloc();
  128. VERIFY_NOT_REACHED();
  129. }
  130. void slab_dealloc(void* ptr, size_t slab_size)
  131. {
  132. if (slab_size <= 16)
  133. return s_slab_allocator_16.dealloc(ptr);
  134. if (slab_size <= 32)
  135. return s_slab_allocator_32.dealloc(ptr);
  136. if (slab_size <= 64)
  137. return s_slab_allocator_64.dealloc(ptr);
  138. if (slab_size <= 128)
  139. return s_slab_allocator_128.dealloc(ptr);
  140. if (slab_size <= 256)
  141. return s_slab_allocator_256.dealloc(ptr);
  142. VERIFY_NOT_REACHED();
  143. }
  144. void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)> callback)
  145. {
  146. for_each_allocator([&](auto& allocator) {
  147. auto num_allocated = allocator.num_allocated();
  148. auto num_free = allocator.slab_count() - num_allocated;
  149. callback(allocator.slab_size(), num_allocated, num_free);
  150. });
  151. }
  152. }