2021-05-27 17:01:26 +00:00
|
|
|
/*
|
2024-10-04 11:19:50 +00:00
|
|
|
* Copyright (c) 2021-2023, Andreas Kling <andreas@ladybird.org>
|
2021-05-27 17:01:26 +00:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
|
|
*/
|
|
|
|
|
2021-05-27 23:52:18 +00:00
|
|
|
#include <AK/Platform.h>
|
2021-10-07 21:25:12 +00:00
|
|
|
#include <AK/Random.h>
|
2021-05-27 17:01:26 +00:00
|
|
|
#include <AK/Vector.h>
|
2024-11-14 15:01:23 +00:00
|
|
|
#include <LibGC/BlockAllocator.h>
|
|
|
|
#include <LibGC/HeapBlock.h>
|
2021-05-27 17:01:26 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
|
2021-05-27 23:52:18 +00:00
|
|
|
#ifdef HAS_ADDRESS_SANITIZER
|
|
|
|
# include <sanitizer/asan_interface.h>
|
2024-04-03 05:53:54 +00:00
|
|
|
# include <sanitizer/lsan_interface.h>
|
2021-05-27 23:52:18 +00:00
|
|
|
#endif
|
|
|
|
|
2024-06-09 10:10:28 +00:00
|
|
|
#if defined(AK_OS_GNU_HURD) || (!defined(MADV_FREE) && !defined(MADV_DONTNEED))
|
2023-12-31 10:36:18 +00:00
|
|
|
# define USE_FALLBACK_BLOCK_DEALLOCATION
|
|
|
|
#endif
|
|
|
|
|
2024-11-14 15:01:23 +00:00
|
|
|
namespace GC {
|
2021-05-27 17:01:26 +00:00
|
|
|
|
|
|
|
BlockAllocator::~BlockAllocator()
|
|
|
|
{
|
|
|
|
for (auto* block : m_blocks) {
|
2021-05-27 23:52:18 +00:00
|
|
|
ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
|
2021-05-27 17:01:26 +00:00
|
|
|
if (munmap(block, HeapBlock::block_size) < 0) {
|
|
|
|
perror("munmap");
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void* BlockAllocator::allocate_block([[maybe_unused]] char const* name)
|
|
|
|
{
|
2021-05-27 19:23:56 +00:00
|
|
|
if (!m_blocks.is_empty()) {
|
2021-10-07 21:25:12 +00:00
|
|
|
// To reduce predictability, take a random block from the cache.
|
|
|
|
size_t random_index = get_random_uniform(m_blocks.size());
|
|
|
|
auto* block = m_blocks.unstable_take(random_index);
|
2021-05-27 23:52:18 +00:00
|
|
|
ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
|
2024-04-03 05:53:54 +00:00
|
|
|
LSAN_REGISTER_ROOT_REGION(block, HeapBlock::block_size);
|
2021-05-27 19:23:56 +00:00
|
|
|
return block;
|
|
|
|
}
|
2021-05-27 17:01:26 +00:00
|
|
|
|
2024-05-09 18:18:11 +00:00
|
|
|
auto* block = (HeapBlock*)mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
2023-12-31 10:36:18 +00:00
|
|
|
VERIFY(block != MAP_FAILED);
|
2024-04-03 05:53:54 +00:00
|
|
|
LSAN_REGISTER_ROOT_REGION(block, HeapBlock::block_size);
|
2021-05-27 17:01:26 +00:00
|
|
|
return block;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockAllocator::deallocate_block(void* block)
|
|
|
|
{
|
|
|
|
VERIFY(block);
|
2023-12-31 10:36:18 +00:00
|
|
|
|
|
|
|
#if defined(USE_FALLBACK_BLOCK_DEALLOCATION)
|
|
|
|
// If we can't use any of the nicer techniques, unmap and remap the block to return the physical pages while keeping the VM.
|
|
|
|
if (munmap(block, HeapBlock::block_size) < 0) {
|
|
|
|
perror("munmap");
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2024-05-09 18:18:11 +00:00
|
|
|
if (mmap(block, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0) != block) {
|
2023-12-31 10:36:18 +00:00
|
|
|
perror("mmap");
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
|
|
|
#elif defined(MADV_FREE)
|
|
|
|
if (madvise(block, HeapBlock::block_size, MADV_FREE) < 0) {
|
|
|
|
perror("madvise(MADV_FREE)");
|
|
|
|
VERIFY_NOT_REACHED();
|
2021-05-27 17:01:26 +00:00
|
|
|
}
|
2023-12-31 10:36:18 +00:00
|
|
|
#elif defined(MADV_DONTNEED)
|
|
|
|
if (madvise(block, HeapBlock::block_size, MADV_DONTNEED) < 0) {
|
|
|
|
perror("madvise(MADV_DONTNEED)");
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
|
|
|
#endif
|
2021-05-27 17:01:26 +00:00
|
|
|
|
2021-05-27 23:52:18 +00:00
|
|
|
ASAN_POISON_MEMORY_REGION(block, HeapBlock::block_size);
|
2024-04-03 05:53:54 +00:00
|
|
|
LSAN_UNREGISTER_ROOT_REGION(block, HeapBlock::block_size);
|
2021-05-27 17:01:26 +00:00
|
|
|
m_blocks.append(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|