mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 15:40:19 +00:00
Kernel: Defer kmalloc heap contraction
Because allocating/freeing regions may require locks that need to wait on other processors for completion, this needs to be delayed until it's safer. Otherwise it is possible to deadlock because we're holding the global heap lock.
This commit is contained in:
parent
b9a97ff81f
commit
28b109688b
Notes:
sideshowbarker
2024-07-19 01:33:28 +09:00
Author: https://github.com/tomuta Commit: https://github.com/SerenityOS/serenity/commit/28b109688bf Pull-request: https://github.com/SerenityOS/serenity/pull/3921 Reviewed-by: https://github.com/awesomekling
1 changed files with 19 additions and 3 deletions
|
@ -50,6 +50,8 @@
|
|||
#define POOL_SIZE (2 * MiB)
|
||||
#define ETERNAL_RANGE_SIZE (2 * MiB)
|
||||
|
||||
static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
|
||||
|
||||
struct KmallocGlobalHeap {
|
||||
struct ExpandGlobalHeap {
|
||||
KmallocGlobalHeap& m_global_heap;
|
||||
|
@ -125,10 +127,26 @@ struct KmallocGlobalHeap {
|
|||
for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) {
|
||||
if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) {
|
||||
auto region = m_global_heap.m_subheap_memory.take(i);
|
||||
klog() << "kmalloc(): Removing memory from heap at " << region->vaddr() << ", bytes: " << region->size();
|
||||
if (!m_global_heap.m_backup_memory) {
|
||||
klog() << "kmalloc(): Using removed memory as backup: " << region->vaddr() << ", bytes: " << region->size();
|
||||
m_global_heap.m_backup_memory = move(region);
|
||||
} else {
|
||||
klog() << "kmalloc(): Queue removing memory from heap at " << region->vaddr() << ", bytes: " << region->size();
|
||||
Processor::deferred_call_queue([this, region = move(region)]() mutable {
|
||||
// We need to defer freeing the region to prevent a potential
|
||||
// deadlock since we are still holding the kmalloc lock
|
||||
// We don't really need to do anything other than holding
|
||||
// onto the region. Unless we already used the backup
|
||||
// memory, in which case we want to use the region as the
|
||||
// new backup.
|
||||
ScopedSpinLock lock(s_lock);
|
||||
if (!m_global_heap.m_backup_memory) {
|
||||
klog() << "kmalloc(): Queued memory region at " << region->vaddr() << ", bytes: " << region->size() << " will be used as new backup";
|
||||
m_global_heap.m_backup_memory = move(region);
|
||||
} else {
|
||||
klog() << "kmalloc(): Queued memory region at " << region->vaddr() << ", bytes: " << region->size() << " will be freed now";
|
||||
}
|
||||
});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -179,8 +197,6 @@ bool g_dump_kmalloc_stacks;
|
|||
static u8* s_next_eternal_ptr;
|
||||
static u8* s_end_of_eternal_range;
|
||||
|
||||
static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
|
||||
|
||||
void kmalloc_enable_expand()
|
||||
{
|
||||
g_kmalloc_global->allocate_backup_memory();
|
||||
|
|
Loading…
Reference in a new issue