
This implements the macOS API malloc_good_size() which returns the true allocation size for a given requested allocation size. This allows us to make use of all the available memory in a malloc chunk. For example, for a malloc request of 35 bytes our malloc would internally use a chunk of size 64, however the remaining 29 bytes would be unused. Knowing the true allocation size allows us to request more usable memory that would otherwise be wasted and make that available for Vector, HashTable and potentially other callers in the future.
475 lines
15 KiB
C++
475 lines
15 KiB
C++
/*
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#include <AK/Debug.h>
|
|
#include <AK/InlineLinkedList.h>
|
|
#include <AK/ScopedValueRollback.h>
|
|
#include <AK/Vector.h>
|
|
#include <LibELF/AuxiliaryVector.h>
|
|
#include <LibThread/Lock.h>
|
|
#include <assert.h>
|
|
#include <errno.h>
|
|
#include <mallocdefs.h>
|
|
#include <serenity.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <sys/internals.h>
|
|
#include <sys/mman.h>
|
|
#include <syscall.h>
|
|
|
|
// FIXME: Thread safety.
|
|
|
|
#define RECYCLE_BIG_ALLOCATIONS
|
|
|
|
#define PAGE_ROUND_UP(x) ((((size_t)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
|
|
|
|
static LibThread::Lock& malloc_lock()
|
|
{
|
|
static u32 lock_storage[sizeof(LibThread::Lock) / sizeof(u32)];
|
|
return *reinterpret_cast<LibThread::Lock*>(&lock_storage);
|
|
}
|
|
|
|
constexpr size_t number_of_chunked_blocks_to_keep_around_per_size_class = 4;
|
|
constexpr size_t number_of_big_blocks_to_keep_around_per_size_class = 8;
|
|
|
|
static bool s_log_malloc = false;
|
|
static bool s_scrub_malloc = true;
|
|
static bool s_scrub_free = true;
|
|
static bool s_profiling = false;
|
|
static bool s_in_userspace_emulator = false;
|
|
|
|
ALWAYS_INLINE static void ue_notify_malloc(const void* ptr, size_t size)
|
|
{
|
|
if (s_in_userspace_emulator)
|
|
syscall(SC_emuctl, 1, size, (FlatPtr)ptr);
|
|
}
|
|
|
|
ALWAYS_INLINE static void ue_notify_free(const void* ptr)
|
|
{
|
|
if (s_in_userspace_emulator)
|
|
syscall(SC_emuctl, 2, (FlatPtr)ptr, 0);
|
|
}
|
|
|
|
ALWAYS_INLINE static void ue_notify_realloc(const void* ptr, size_t size)
|
|
{
|
|
if (s_in_userspace_emulator)
|
|
syscall(SC_emuctl, 3, size, (FlatPtr)ptr);
|
|
}
|
|
|
|
struct MallocStats {
|
|
size_t number_of_malloc_calls;
|
|
|
|
size_t number_of_big_allocator_hits;
|
|
size_t number_of_big_allocator_purge_hits;
|
|
size_t number_of_big_allocs;
|
|
|
|
size_t number_of_empty_block_hits;
|
|
size_t number_of_empty_block_purge_hits;
|
|
size_t number_of_block_allocs;
|
|
size_t number_of_blocks_full;
|
|
|
|
size_t number_of_free_calls;
|
|
|
|
size_t number_of_big_allocator_keeps;
|
|
size_t number_of_big_allocator_frees;
|
|
|
|
size_t number_of_freed_full_blocks;
|
|
size_t number_of_keeps;
|
|
size_t number_of_frees;
|
|
};
|
|
static MallocStats g_malloc_stats = {};
|
|
|
|
struct Allocator {
|
|
size_t size { 0 };
|
|
size_t block_count { 0 };
|
|
size_t empty_block_count { 0 };
|
|
ChunkedBlock* empty_blocks[number_of_chunked_blocks_to_keep_around_per_size_class] { nullptr };
|
|
InlineLinkedList<ChunkedBlock> usable_blocks;
|
|
InlineLinkedList<ChunkedBlock> full_blocks;
|
|
};
|
|
|
|
struct BigAllocator {
|
|
Vector<BigAllocationBlock*, number_of_big_blocks_to_keep_around_per_size_class> blocks;
|
|
};
|
|
|
|
// Allocators will be initialized in __malloc_init.
|
|
// We can not rely on global constructors to initialize them,
|
|
// because they must be initialized before other global constructors
|
|
// are run. Similarly, we can not allow global destructors to destruct
|
|
// them. We could have used AK::NeverDestoyed to prevent the latter,
|
|
// but it would have not helped with the former.
|
|
static u8 g_allocators_storage[sizeof(Allocator) * num_size_classes];
|
|
static u8 g_big_allocators_storage[sizeof(BigAllocator)];
|
|
|
|
static inline Allocator (&allocators())[num_size_classes]
|
|
{
|
|
return reinterpret_cast<Allocator(&)[num_size_classes]>(g_allocators_storage);
|
|
}
|
|
|
|
static inline BigAllocator (&big_allocators())[1]
|
|
{
|
|
return reinterpret_cast<BigAllocator(&)[1]>(g_big_allocators_storage);
|
|
}
|
|
|
|
static Allocator* allocator_for_size(size_t size, size_t& good_size)
|
|
{
|
|
for (size_t i = 0; size_classes[i]; ++i) {
|
|
if (size <= size_classes[i]) {
|
|
good_size = size_classes[i];
|
|
return &allocators()[i];
|
|
}
|
|
}
|
|
good_size = PAGE_ROUND_UP(size);
|
|
return nullptr;
|
|
}
|
|
|
|
#ifdef RECYCLE_BIG_ALLOCATIONS
|
|
static BigAllocator* big_allocator_for_size(size_t size)
|
|
{
|
|
if (size == 65536)
|
|
return &big_allocators()[0];
|
|
return nullptr;
|
|
}
|
|
#endif
|
|
|
|
extern "C" {
|
|
|
|
static void* os_alloc(size_t size, const char* name)
|
|
{
|
|
auto* ptr = serenity_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, ChunkedBlock::block_size, name);
|
|
VERIFY(ptr != MAP_FAILED);
|
|
return ptr;
|
|
}
|
|
|
|
static void os_free(void* ptr, size_t size)
|
|
{
|
|
int rc = munmap(ptr, size);
|
|
assert(rc == 0);
|
|
}
|
|
|
|
enum class CallerWillInitializeMemory {
|
|
No,
|
|
Yes,
|
|
};
|
|
|
|
static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
|
|
{
|
|
LibThread::Locker locker(malloc_lock());
|
|
|
|
if (s_log_malloc)
|
|
dbgln("LibC: malloc({})", size);
|
|
|
|
if (!size) {
|
|
// Legally we could just return a null pointer here, but this is more
|
|
// compatible with existing software.
|
|
size = 1;
|
|
}
|
|
|
|
g_malloc_stats.number_of_malloc_calls++;
|
|
|
|
size_t good_size;
|
|
auto* allocator = allocator_for_size(size, good_size);
|
|
|
|
if (!allocator) {
|
|
size_t real_size = round_up_to_power_of_two(sizeof(BigAllocationBlock) + size, ChunkedBlock::block_size);
|
|
#ifdef RECYCLE_BIG_ALLOCATIONS
|
|
if (auto* allocator = big_allocator_for_size(real_size)) {
|
|
if (!allocator->blocks.is_empty()) {
|
|
g_malloc_stats.number_of_big_allocator_hits++;
|
|
auto* block = allocator->blocks.take_last();
|
|
int rc = madvise(block, real_size, MADV_SET_NONVOLATILE);
|
|
bool this_block_was_purged = rc == 1;
|
|
if (rc < 0) {
|
|
perror("madvise");
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
if (mprotect(block, real_size, PROT_READ | PROT_WRITE) < 0) {
|
|
perror("mprotect");
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
if (this_block_was_purged) {
|
|
g_malloc_stats.number_of_big_allocator_purge_hits++;
|
|
new (block) BigAllocationBlock(real_size);
|
|
}
|
|
|
|
ue_notify_malloc(&block->m_slot[0], size);
|
|
return &block->m_slot[0];
|
|
}
|
|
}
|
|
#endif
|
|
g_malloc_stats.number_of_big_allocs++;
|
|
auto* block = (BigAllocationBlock*)os_alloc(real_size, "malloc: BigAllocationBlock");
|
|
new (block) BigAllocationBlock(real_size);
|
|
ue_notify_malloc(&block->m_slot[0], size);
|
|
return &block->m_slot[0];
|
|
}
|
|
|
|
ChunkedBlock* block = nullptr;
|
|
|
|
for (block = allocator->usable_blocks.head(); block; block = block->next()) {
|
|
if (block->free_chunks())
|
|
break;
|
|
}
|
|
|
|
if (!block && allocator->empty_block_count) {
|
|
g_malloc_stats.number_of_empty_block_hits++;
|
|
block = allocator->empty_blocks[--allocator->empty_block_count];
|
|
int rc = madvise(block, ChunkedBlock::block_size, MADV_SET_NONVOLATILE);
|
|
bool this_block_was_purged = rc == 1;
|
|
if (rc < 0) {
|
|
perror("madvise");
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
rc = mprotect(block, ChunkedBlock::block_size, PROT_READ | PROT_WRITE);
|
|
if (rc < 0) {
|
|
perror("mprotect");
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
if (this_block_was_purged) {
|
|
g_malloc_stats.number_of_empty_block_purge_hits++;
|
|
new (block) ChunkedBlock(good_size);
|
|
}
|
|
allocator->usable_blocks.append(block);
|
|
}
|
|
|
|
if (!block) {
|
|
g_malloc_stats.number_of_block_allocs++;
|
|
char buffer[64];
|
|
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
|
block = (ChunkedBlock*)os_alloc(ChunkedBlock::block_size, buffer);
|
|
new (block) ChunkedBlock(good_size);
|
|
allocator->usable_blocks.append(block);
|
|
++allocator->block_count;
|
|
}
|
|
|
|
--block->m_free_chunks;
|
|
void* ptr = block->m_freelist;
|
|
if (ptr) {
|
|
block->m_freelist = block->m_freelist->next;
|
|
} else {
|
|
ptr = block->m_slot + block->m_next_lazy_freelist_index * block->m_size;
|
|
block->m_next_lazy_freelist_index++;
|
|
}
|
|
VERIFY(ptr);
|
|
if (block->is_full()) {
|
|
g_malloc_stats.number_of_blocks_full++;
|
|
dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);
|
|
allocator->usable_blocks.remove(block);
|
|
allocator->full_blocks.append(block);
|
|
}
|
|
dbgln_if(MALLOC_DEBUG, "LibC: allocated {:p} (chunk in block {:p}, size {})", ptr, block, block->bytes_per_chunk());
|
|
|
|
if (s_scrub_malloc && caller_will_initialize_memory == CallerWillInitializeMemory::No)
|
|
memset(ptr, MALLOC_SCRUB_BYTE, block->m_size);
|
|
|
|
ue_notify_malloc(ptr, size);
|
|
return ptr;
|
|
}
|
|
|
|
static void free_impl(void* ptr)
|
|
{
|
|
ScopedValueRollback rollback(errno);
|
|
|
|
if (!ptr)
|
|
return;
|
|
|
|
g_malloc_stats.number_of_free_calls++;
|
|
|
|
LibThread::Locker locker(malloc_lock());
|
|
|
|
void* block_base = (void*)((FlatPtr)ptr & ChunkedBlock::ChunkedBlock::block_mask);
|
|
size_t magic = *(size_t*)block_base;
|
|
|
|
if (magic == MAGIC_BIGALLOC_HEADER) {
|
|
auto* block = (BigAllocationBlock*)block_base;
|
|
#ifdef RECYCLE_BIG_ALLOCATIONS
|
|
if (auto* allocator = big_allocator_for_size(block->m_size)) {
|
|
if (allocator->blocks.size() < number_of_big_blocks_to_keep_around_per_size_class) {
|
|
g_malloc_stats.number_of_big_allocator_keeps++;
|
|
allocator->blocks.append(block);
|
|
size_t this_block_size = block->m_size;
|
|
if (mprotect(block, this_block_size, PROT_NONE) < 0) {
|
|
perror("mprotect");
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
if (madvise(block, this_block_size, MADV_SET_VOLATILE) != 0) {
|
|
perror("madvise");
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
#endif
|
|
g_malloc_stats.number_of_big_allocator_frees++;
|
|
os_free(block, block->m_size);
|
|
return;
|
|
}
|
|
|
|
assert(magic == MAGIC_PAGE_HEADER);
|
|
auto* block = (ChunkedBlock*)block_base;
|
|
|
|
dbgln_if(MALLOC_DEBUG, "LibC: freeing {:p} in allocator {:p} (size={}, used={})", ptr, block, block->bytes_per_chunk(), block->used_chunks());
|
|
|
|
if (s_scrub_free)
|
|
memset(ptr, FREE_SCRUB_BYTE, block->bytes_per_chunk());
|
|
|
|
auto* entry = (FreelistEntry*)ptr;
|
|
entry->next = block->m_freelist;
|
|
block->m_freelist = entry;
|
|
|
|
if (block->is_full()) {
|
|
size_t good_size;
|
|
auto* allocator = allocator_for_size(block->m_size, good_size);
|
|
dbgln_if(MALLOC_DEBUG, "Block {:p} no longer full in size class {}", block, good_size);
|
|
g_malloc_stats.number_of_freed_full_blocks++;
|
|
allocator->full_blocks.remove(block);
|
|
allocator->usable_blocks.prepend(block);
|
|
}
|
|
|
|
++block->m_free_chunks;
|
|
|
|
if (!block->used_chunks()) {
|
|
size_t good_size;
|
|
auto* allocator = allocator_for_size(block->m_size, good_size);
|
|
if (allocator->block_count < number_of_chunked_blocks_to_keep_around_per_size_class) {
|
|
dbgln_if(MALLOC_DEBUG, "Keeping block {:p} around for size class {}", block, good_size);
|
|
g_malloc_stats.number_of_keeps++;
|
|
allocator->usable_blocks.remove(block);
|
|
allocator->empty_blocks[allocator->empty_block_count++] = block;
|
|
mprotect(block, ChunkedBlock::block_size, PROT_NONE);
|
|
madvise(block, ChunkedBlock::block_size, MADV_SET_VOLATILE);
|
|
return;
|
|
}
|
|
dbgln_if(MALLOC_DEBUG, "Releasing block {:p} for size class {}", block, good_size);
|
|
g_malloc_stats.number_of_frees++;
|
|
allocator->usable_blocks.remove(block);
|
|
--allocator->block_count;
|
|
os_free(block, ChunkedBlock::block_size);
|
|
}
|
|
}
|
|
|
|
[[gnu::flatten]] void* malloc(size_t size)
|
|
{
|
|
void* ptr = malloc_impl(size, CallerWillInitializeMemory::No);
|
|
if (s_profiling)
|
|
perf_event(PERF_EVENT_MALLOC, size, reinterpret_cast<FlatPtr>(ptr));
|
|
return ptr;
|
|
}
|
|
|
|
[[gnu::flatten]] void free(void* ptr)
|
|
{
|
|
if (s_profiling)
|
|
perf_event(PERF_EVENT_FREE, reinterpret_cast<FlatPtr>(ptr), 0);
|
|
ue_notify_free(ptr);
|
|
free_impl(ptr);
|
|
}
|
|
|
|
void* calloc(size_t count, size_t size)
|
|
{
|
|
size_t new_size = count * size;
|
|
auto* ptr = malloc_impl(new_size, CallerWillInitializeMemory::Yes);
|
|
if (ptr)
|
|
memset(ptr, 0, new_size);
|
|
return ptr;
|
|
}
|
|
|
|
size_t malloc_size(void* ptr)
|
|
{
|
|
if (!ptr)
|
|
return 0;
|
|
LibThread::Locker locker(malloc_lock());
|
|
void* page_base = (void*)((FlatPtr)ptr & ChunkedBlock::block_mask);
|
|
auto* header = (const CommonHeader*)page_base;
|
|
auto size = header->m_size;
|
|
if (header->m_magic == MAGIC_BIGALLOC_HEADER)
|
|
size -= sizeof(CommonHeader);
|
|
else
|
|
VERIFY(header->m_magic == MAGIC_PAGE_HEADER);
|
|
return size;
|
|
}
|
|
|
|
size_t malloc_good_size(size_t size)
|
|
{
|
|
size_t good_size;
|
|
allocator_for_size(size, good_size);
|
|
return good_size;
|
|
}
|
|
|
|
void* realloc(void* ptr, size_t size)
|
|
{
|
|
if (!ptr)
|
|
return malloc(size);
|
|
if (!size)
|
|
return nullptr;
|
|
|
|
LibThread::Locker locker(malloc_lock());
|
|
auto existing_allocation_size = malloc_size(ptr);
|
|
|
|
if (size <= existing_allocation_size) {
|
|
ue_notify_realloc(ptr, size);
|
|
return ptr;
|
|
}
|
|
auto* new_ptr = malloc(size);
|
|
if (new_ptr) {
|
|
memcpy(new_ptr, ptr, min(existing_allocation_size, size));
|
|
free(ptr);
|
|
}
|
|
return new_ptr;
|
|
}
|
|
|
|
void __malloc_init()
|
|
{
|
|
new (&malloc_lock()) LibThread::Lock();
|
|
|
|
s_in_userspace_emulator = (int)syscall(SC_emuctl, 0) != -ENOSYS;
|
|
if (s_in_userspace_emulator) {
|
|
// Don't bother scrubbing memory if we're running in UE since it
|
|
// keeps track of heap memory anyway.
|
|
s_scrub_malloc = false;
|
|
s_scrub_free = false;
|
|
}
|
|
|
|
if (secure_getenv("LIBC_NOSCRUB_MALLOC"))
|
|
s_scrub_malloc = false;
|
|
if (secure_getenv("LIBC_NOSCRUB_FREE"))
|
|
s_scrub_free = false;
|
|
if (secure_getenv("LIBC_LOG_MALLOC"))
|
|
s_log_malloc = true;
|
|
if (secure_getenv("LIBC_PROFILE_MALLOC"))
|
|
s_profiling = true;
|
|
|
|
for (size_t i = 0; i < num_size_classes; ++i) {
|
|
new (&allocators()[i]) Allocator();
|
|
allocators()[i].size = size_classes[i];
|
|
}
|
|
|
|
new (&big_allocators()[0])(BigAllocator);
|
|
}
|
|
|
|
void serenity_dump_malloc_stats()
|
|
{
|
|
dbgln("# malloc() calls: {}", g_malloc_stats.number_of_malloc_calls);
|
|
dbgln();
|
|
dbgln("big alloc hits: {}", g_malloc_stats.number_of_big_allocator_hits);
|
|
dbgln("big alloc hits that were purged: {}", g_malloc_stats.number_of_big_allocator_purge_hits);
|
|
dbgln("big allocs: {}", g_malloc_stats.number_of_big_allocs);
|
|
dbgln();
|
|
dbgln("empty block hits: {}", g_malloc_stats.number_of_empty_block_hits);
|
|
dbgln("empty block hits that were purged: {}", g_malloc_stats.number_of_empty_block_purge_hits);
|
|
dbgln("block allocs: {}", g_malloc_stats.number_of_block_allocs);
|
|
dbgln("filled blocks: {}", g_malloc_stats.number_of_blocks_full);
|
|
dbgln();
|
|
dbgln("# free() calls: {}", g_malloc_stats.number_of_free_calls);
|
|
dbgln();
|
|
dbgln("big alloc keeps: {}", g_malloc_stats.number_of_big_allocator_keeps);
|
|
dbgln("big alloc frees: {}", g_malloc_stats.number_of_big_allocator_frees);
|
|
dbgln();
|
|
dbgln("full block frees: {}", g_malloc_stats.number_of_freed_full_blocks);
|
|
dbgln("number of keeps: {}", g_malloc_stats.number_of_keeps);
|
|
dbgln("number of frees: {}", g_malloc_stats.number_of_frees);
|
|
}
|
|
}
|