mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-12-24 23:23:58 +00:00
Add slightly better kmalloc_aligned() and kfree_aligned().
Process page directories can now actually be freed. This could definitely be implemented in a nicer, less wasteful way, but this works for now. The spawn stress test can now run for a lot longer but eventually dies due to kmalloc running out of memory.
This commit is contained in:
parent
55c722096d
commit
f6179ad9f9
Notes:
sideshowbarker
2024-07-19 16:07:00 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/f6179ad9f9b
5 changed files with 26 additions and 20 deletions
|
@ -58,6 +58,8 @@ void MemoryManager::release_page_directory(PageDirectory& page_directory)
|
|||
#ifdef SCRUB_DEALLOCATED_PAGE_TABLES
|
||||
memset(&page_directory, 0xc9, sizeof(PageDirectory));
|
||||
#endif
|
||||
|
||||
kfree_aligned(&page_directory);
|
||||
}
|
||||
|
||||
void MemoryManager::initialize_paging()
|
||||
|
|
|
@ -318,7 +318,7 @@ ByteBuffer procfs$kmalloc()
|
|||
{
|
||||
auto buffer = ByteBuffer::create_uninitialized(256);
|
||||
char* ptr = (char*)buffer.pointer();
|
||||
ptr += ksprintf(ptr, "eternal: %u\npage-aligned: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
|
||||
ptr += ksprintf(ptr, "eternal: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
|
||||
buffer.trim(ptr - (char*)buffer.pointer());
|
||||
return buffer;
|
||||
}
|
||||
|
|
|
@ -40,8 +40,8 @@ static void spawn_stress()
|
|||
|
||||
for (unsigned i = 0; i < 10000; ++i) {
|
||||
int error;
|
||||
Process::create_user_process("/bin/id", (uid_t)100, (gid_t)100, (pid_t)0, error, Vector<String>(), Vector<String>(), tty0);
|
||||
kprintf("malloc stats: alloc:%u free:%u page_aligned:%u eternal:%u\n", sum_alloc, sum_free, kmalloc_page_aligned, kmalloc_sum_eternal);
|
||||
Process::create_user_process("/bin/true", (uid_t)100, (gid_t)100, (pid_t)0, error, Vector<String>(), Vector<String>(), tty0);
|
||||
kprintf("malloc stats: alloc:%u free:%u eternal:%u ", sum_alloc, sum_free, kmalloc_sum_eternal);
|
||||
kprintf("delta:%u\n", sum_alloc - lastAlloc);
|
||||
lastAlloc = sum_alloc;
|
||||
sleep(60);
|
||||
|
|
|
@ -32,20 +32,14 @@ static byte alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
|
|||
volatile size_t sum_alloc = 0;
|
||||
volatile size_t sum_free = POOL_SIZE;
|
||||
volatile size_t kmalloc_sum_eternal = 0;
|
||||
volatile size_t kmalloc_sum_page_aligned = 0;
|
||||
|
||||
static byte* s_next_eternal_ptr;
|
||||
static byte* s_next_page_aligned_ptr;
|
||||
|
||||
static byte* s_end_of_eternal_range;
|
||||
static byte* s_end_of_page_aligned_range;
|
||||
|
||||
bool is_kmalloc_address(void* ptr)
|
||||
{
|
||||
if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
|
||||
return true;
|
||||
if (ptr >= (byte*)PAGE_ALIGNED_BASE_PHYSICAL && ptr < s_next_page_aligned_ptr)
|
||||
return true;
|
||||
return (dword)ptr >= BASE_PHYS && (dword)ptr <= (BASE_PHYS + POOL_SIZE);
|
||||
}
|
||||
|
||||
|
@ -55,15 +49,11 @@ void kmalloc_init()
|
|||
memset( (void *)BASE_PHYS, 0, POOL_SIZE );
|
||||
|
||||
kmalloc_sum_eternal = 0;
|
||||
kmalloc_sum_page_aligned = 0;
|
||||
sum_alloc = 0;
|
||||
sum_free = POOL_SIZE;
|
||||
|
||||
s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
|
||||
s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
|
||||
|
||||
s_end_of_eternal_range = s_next_eternal_ptr + RANGE_SIZE;
|
||||
s_end_of_page_aligned_range = s_next_page_aligned_ptr + RANGE_SIZE;
|
||||
}
|
||||
|
||||
void* kmalloc_eternal(size_t size)
|
||||
|
@ -75,16 +65,28 @@ void* kmalloc_eternal(size_t size)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void* kmalloc_page_aligned(size_t size)
|
||||
void* kmalloc_aligned(size_t size, size_t alignment)
|
||||
{
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
void* ptr = s_next_page_aligned_ptr;
|
||||
s_next_page_aligned_ptr += size;
|
||||
ASSERT(s_next_page_aligned_ptr < s_end_of_page_aligned_range);
|
||||
kmalloc_sum_page_aligned += size;
|
||||
return ptr;
|
||||
void* ptr = kmalloc(size + alignment + sizeof(void*));
|
||||
dword max_addr = (dword)ptr + alignment;
|
||||
void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
|
||||
|
||||
((void**)aligned_ptr)[-1] = ptr;
|
||||
return aligned_ptr;
|
||||
}
|
||||
|
||||
void kfree_aligned(void* ptr)
|
||||
{
|
||||
kfree(((void**)ptr)[-1]);
|
||||
}
|
||||
|
||||
void* kmalloc_page_aligned(size_t size)
|
||||
{
|
||||
void* ptr = kmalloc_aligned(size, PAGE_SIZE);
|
||||
dword d = (dword)ptr;
|
||||
ASSERT((d & PAGE_MASK) == d);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* kmalloc(dword size)
|
||||
{
|
||||
|
|
|
@ -4,7 +4,9 @@ void kmalloc_init();
|
|||
void *kmalloc(dword size) __attribute__ ((malloc));
|
||||
void* kmalloc_eternal(size_t) __attribute__ ((malloc));
|
||||
void* kmalloc_page_aligned(size_t) __attribute__ ((malloc));
|
||||
void* kmalloc_aligned(size_t, size_t alignment) __attribute__ ((malloc));
|
||||
void kfree(void*);
|
||||
void kfree_aligned(void*);
|
||||
|
||||
bool is_kmalloc_address(void*);
|
||||
|
||||
|
|
Loading…
Reference in a new issue