diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index f609b474592..23c05c1dd83 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -644,6 +644,11 @@ Optional procfs$memstat(InodeIdentifier) json.add("super_physical_available", MM.super_physical_pages()); json.add("kmalloc_call_count", g_kmalloc_call_count); json.add("kfree_call_count", g_kfree_call_count); + slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) { + auto prefix = String::format("slab_%zu", slab_size); + json.add(String::format("%s_num_allocated", prefix.characters()), num_allocated); + json.add(String::format("%s_num_free", prefix.characters()), num_free); + }); json.finish(); return builder.build(); } diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp new file mode 100644 index 00000000000..b697d5d05a6 --- /dev/null +++ b/Kernel/Heap/SlabAllocator.cpp @@ -0,0 +1,118 @@ +#include +#include +#include +#include + +template +class SlabAllocator { +public: + SlabAllocator() {} + + void init(size_t size) + { + void* base = kmalloc_eternal(size); + FreeSlab* slabs = (FreeSlab*)base; + size_t slab_count = size / templated_slab_size; + for (size_t i = 1; i < slab_count; ++i) { + slabs[i].next = &slabs[i - 1]; + } + slabs[0].next = nullptr; + m_freelist = &slabs[slab_count - 1]; + m_num_allocated = 0; + m_num_free = slab_count; + } + + constexpr size_t slab_size() const { return templated_slab_size; } + + void* alloc() + { + InterruptDisabler disabler; + ASSERT(m_freelist); + void* ptr = m_freelist; + m_freelist = m_freelist->next; + ++m_num_allocated; + --m_num_free; + return ptr; + } + + void dealloc(void* ptr) + { + InterruptDisabler disabler; + ASSERT(ptr); + ((FreeSlab*)ptr)->next = m_freelist; + m_freelist = (FreeSlab*)ptr; + ++m_num_allocated; + --m_num_free; + } + + size_t num_allocated() const { return m_num_allocated; } + size_t num_free() const { return m_num_free; } + +private: + struct FreeSlab { + FreeSlab* next { nullptr }; + char padding[templated_slab_size - sizeof(FreeSlab*)]; + }; + + FreeSlab* m_freelist { nullptr }; + size_t m_num_allocated { 0 }; + size_t m_num_free { 0 }; + + static_assert(sizeof(FreeSlab) == templated_slab_size); +}; + +static SlabAllocator<8> s_slab_allocator_8; +static SlabAllocator<16> s_slab_allocator_16; +static SlabAllocator<32> s_slab_allocator_32; +static SlabAllocator<52> s_slab_allocator_52; + +static_assert(sizeof(Region) <= s_slab_allocator_52.slab_size()); + +template +void for_each_allocator(Callback callback) +{ + callback(s_slab_allocator_8); + callback(s_slab_allocator_16); + callback(s_slab_allocator_32); + callback(s_slab_allocator_52); +} + +void slab_alloc_init() +{ + for_each_allocator([&](auto& allocator) { + allocator.init(128 * KB); + }); +} + +void* slab_alloc(size_t slab_size) +{ + if (slab_size <= 8) + return s_slab_allocator_8.alloc(); + if (slab_size <= 16) + return s_slab_allocator_16.alloc(); + if (slab_size <= 32) + return s_slab_allocator_32.alloc(); + if (slab_size <= 52) + return s_slab_allocator_52.alloc(); + ASSERT_NOT_REACHED(); +} + +void slab_dealloc(void* ptr, size_t slab_size) +{ + if (slab_size <= 8) + return s_slab_allocator_8.dealloc(ptr); + if (slab_size <= 16) + return s_slab_allocator_16.dealloc(ptr); + if (slab_size <= 32) + return s_slab_allocator_32.dealloc(ptr); + if (slab_size <= 52) + return s_slab_allocator_52.dealloc(ptr); + ASSERT_NOT_REACHED(); +} + +void slab_alloc_stats(Function callback) +{ + for_each_allocator([&](auto& allocator) { + callback(allocator.slab_size(), allocator.num_allocated(), allocator.num_free()); + }); +} diff --git a/Kernel/Heap/SlabAllocator.h b/Kernel/Heap/SlabAllocator.h new file mode 100644 index 00000000000..c44a3bb90b1 --- /dev/null +++ b/Kernel/Heap/SlabAllocator.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +class JsonObjectSerializer; + +void* slab_alloc(size_t slab_size); +void slab_dealloc(void*, size_t slab_size); +void slab_alloc_init(); +void slab_alloc_stats(Function); + +#define MAKE_SLAB_ALLOCATED(type) \ +public: \ + void* operator new(size_t) { return slab_alloc(sizeof(type)); } \ + void operator delete(void* ptr) { slab_dealloc(ptr, sizeof(type)); } \ + \ +private: diff --git a/Kernel/Makefile b/Kernel/Makefile index 3a1d0efdae2..aeb8a15e858 100644 --- a/Kernel/Makefile +++ b/Kernel/Makefile @@ -3,6 +3,7 @@ include ../Makefile.common KERNEL_OBJS = \ init.o \ Heap/kmalloc.o \ + Heap/SlabAllocator.o \ StdLib.o \ Lock.o \ Arch/i386/CPU.o \ diff --git a/Kernel/VM/PhysicalPage.h b/Kernel/VM/PhysicalPage.h index cb510612135..477a9cca3e6 100644 --- a/Kernel/VM/PhysicalPage.h +++ b/Kernel/VM/PhysicalPage.h @@ -2,6 +2,7 @@ #include #include +#include #include class PhysicalPage { @@ -9,6 +10,7 @@ class PhysicalPage { friend class PageDirectory; friend class VMObject; + MAKE_SLAB_ALLOCATED(PhysicalPage) public: PhysicalAddress paddr() const { return m_paddr; } diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 7d1e02e6b77..408c620b299 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -1,18 +1,20 @@ #pragma once -#include #include #include +#include +#include #include #include class Inode; class VMObject; -class Region : public RefCounted +class Region final : public RefCounted , public InlineLinkedListNode { friend class MemoryManager; + MAKE_SLAB_ALLOCATED(Region) public: enum Access { Read = 1, diff --git a/Kernel/init.cpp b/Kernel/init.cpp index f6a1da6c901..d73b1bca1db 100644 --- a/Kernel/init.cpp +++ b/Kernel/init.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -187,6 +188,7 @@ extern "C" [[noreturn]] void init() sse_init(); kmalloc_init(); + slab_alloc_init(); init_ksyms(); // must come after kmalloc_init because we use AK_MAKE_ETERNAL in KParams