ladybird/AK/BumpAllocator.h
Ben Wiederhake 50698a0db4 AK: Prevent accidental misuse of BumpAllocator
In particular, we implicitly required that the caller initializes the
returned instances themselves (solved by making
UniformBumpAllocator::allocate call the constructor), and BumpAllocator
itself cannot handle classes that are not trivially deconstructible
(solved by deleting the method).

Co-authored-by: Ali Mohammad Pur <ali.mpfard@gmail.com>
2021-10-23 19:02:54 +01:00

194 lines
6 KiB
C++

/*
* Copyright (c) 2021, Ali Mohammad Pur <mpfard@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/StdLibExtras.h>
#include <AK/Types.h>
#include <AK/kmalloc.h>
#include <sys/mman.h>
namespace AK {
template<bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
class BumpAllocator {
public:
BumpAllocator()
{
if constexpr (use_mmap)
m_chunk_size = chunk_size;
else
m_chunk_size = kmalloc_good_size(chunk_size);
}
~BumpAllocator()
{
deallocate_all();
}
void* allocate(size_t size, size_t align)
{
VERIFY(size < m_chunk_size - sizeof(ChunkHeader));
if (!m_current_chunk) {
if (!allocate_a_chunk())
return nullptr;
}
allocate_again:;
VERIFY(m_current_chunk != 0);
auto aligned_ptr = align_up_to(m_byte_offset_into_current_chunk + m_current_chunk, align);
auto next_offset = aligned_ptr + size - m_current_chunk;
if (next_offset > m_chunk_size) {
if (!allocate_a_chunk())
return nullptr;
goto allocate_again;
}
m_byte_offset_into_current_chunk = next_offset;
return (void*)aligned_ptr;
}
void deallocate_all()
{
if (!m_head_chunk)
return;
for_each_chunk([this](auto chunk) {
if (!s_unused_allocation_cache) {
auto next_chunk = ((ChunkHeader const*)chunk)->next_chunk;
if (!next_chunk) {
s_unused_allocation_cache = chunk;
return;
}
}
if constexpr (use_mmap) {
munmap((void*)chunk, m_chunk_size);
} else {
kfree_sized((void*)chunk, m_chunk_size);
}
});
}
protected:
template<typename TFn>
void for_each_chunk(TFn&& fn)
{
auto head_chunk = m_head_chunk;
while (head_chunk) {
auto& chunk_header = *(ChunkHeader const*)head_chunk;
VERIFY(chunk_header.magic == chunk_magic);
if (head_chunk == m_current_chunk)
VERIFY(chunk_header.next_chunk == 0);
auto next_chunk = chunk_header.next_chunk;
fn(head_chunk);
head_chunk = next_chunk;
}
}
bool allocate_a_chunk()
{
// dbgln("Allocated {} entries in previous chunk and have {} unusable bytes", m_allocations_in_previous_chunk, m_chunk_size - m_byte_offset_into_current_chunk);
// m_allocations_in_previous_chunk = 0;
void* new_chunk;
if (s_unused_allocation_cache) {
new_chunk = (void*)exchange(s_unused_allocation_cache, 0);
} else {
if constexpr (use_mmap) {
#ifdef __serenity__
new_chunk = serenity_mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, m_chunk_size, "BumpAllocator Chunk");
#else
new_chunk = mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
#endif
if (new_chunk == MAP_FAILED)
return false;
} else {
new_chunk = kmalloc(m_chunk_size);
if (!new_chunk)
return false;
}
}
auto& new_header = *(ChunkHeader*)new_chunk;
new_header.magic = chunk_magic;
new_header.next_chunk = 0;
m_byte_offset_into_current_chunk = sizeof(ChunkHeader);
if (!m_head_chunk) {
VERIFY(!m_current_chunk);
m_head_chunk = (FlatPtr)new_chunk;
m_current_chunk = (FlatPtr)new_chunk;
return true;
}
VERIFY(m_current_chunk);
auto& old_header = *(ChunkHeader*)m_current_chunk;
VERIFY(old_header.magic == chunk_magic);
VERIFY(old_header.next_chunk == 0);
old_header.next_chunk = (FlatPtr)new_chunk;
m_current_chunk = (FlatPtr)new_chunk;
return true;
}
constexpr static FlatPtr chunk_magic = explode_byte(0xdf);
struct ChunkHeader {
FlatPtr magic;
FlatPtr next_chunk;
};
FlatPtr m_head_chunk { 0 };
FlatPtr m_current_chunk { 0 };
size_t m_byte_offset_into_current_chunk { 0 };
size_t m_chunk_size { 0 };
static FlatPtr s_unused_allocation_cache;
};
template<typename T, bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
class UniformBumpAllocator : protected BumpAllocator<use_mmap, chunk_size> {
using Allocator = BumpAllocator<use_mmap, chunk_size>;
public:
UniformBumpAllocator() = default;
~UniformBumpAllocator()
{
destroy_all();
}
template<typename... Args>
T* allocate(Args&&... args)
{
auto ptr = (T*)Allocator::allocate(sizeof(T), alignof(T));
if (!ptr)
return nullptr;
return new (ptr) T { forward<Args>(args)... };
}
void deallocate_all()
{
destroy_all();
Allocator::deallocate_all();
}
void destroy_all()
{
this->for_each_chunk([&](auto chunk) {
auto base_ptr = align_up_to(chunk + sizeof(typename Allocator::ChunkHeader), alignof(T));
// Compute the offset of the first byte *after* this chunk:
FlatPtr end_offset = base_ptr + this->m_chunk_size - chunk;
// Compute the offset of the first byte *after* the last valid object, in case the end of the chunk does not align with the end of an object:
end_offset = (end_offset / sizeof(T)) * sizeof(T);
if (chunk == this->m_current_chunk)
end_offset = this->m_byte_offset_into_current_chunk;
for (; base_ptr - chunk < end_offset; base_ptr += sizeof(T))
reinterpret_cast<T*>(base_ptr)->~T();
});
}
};
template<bool use_mmap, size_t size>
inline FlatPtr BumpAllocator<use_mmap, size>::s_unused_allocation_cache { 0 };
}
using AK::BumpAllocator;
using AK::UniformBumpAllocator;