2018-10-16 09:01:38 +00:00
/*
* Really really * really * Q & D malloc ( ) and free ( ) implementations
* just to get going . Don ' t ever let anyone see this shit . : ^ )
*/
2019-06-07 09:43:58 +00:00
# include <AK/Assertions.h>
2019-04-06 12:29:29 +00:00
# include <AK/Types.h>
2019-06-07 18:02:01 +00:00
# include <Kernel/Arch/i386/CPU.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/KSyms.h>
2019-04-03 12:41:40 +00:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/StdLib.h>
2019-11-23 16:27:09 +00:00
# include <Kernel/Heap/kmalloc.h>
2018-10-16 09:01:38 +00:00
# define SANITIZE_KMALLOC
2019-06-07 09:43:58 +00:00
struct [[gnu::packed]] allocation_t
{
2019-02-22 09:23:06 +00:00
size_t start ;
size_t nchunk ;
2019-02-15 11:30:48 +00:00
} ;
2018-10-16 09:01:38 +00:00
2019-11-23 16:27:09 +00:00
# define BASE_PHYSICAL (4 * MB)
2019-09-16 07:01:44 +00:00
# define CHUNK_SIZE 8
2019-11-04 11:00:29 +00:00
# define POOL_SIZE (3 * MB)
2018-10-16 09:01:38 +00:00
2019-11-23 16:27:09 +00:00
# define ETERNAL_BASE_PHYSICAL (2 * MB)
2019-06-09 09:48:58 +00:00
# define ETERNAL_RANGE_SIZE (2 * MB)
2018-10-16 09:01:38 +00:00
2019-07-03 19:17:35 +00:00
static u8 alloc_map [ POOL_SIZE / CHUNK_SIZE / 8 ] ;
2018-10-16 09:01:38 +00:00
2018-12-02 22:34:50 +00:00
volatile size_t sum_alloc = 0 ;
volatile size_t sum_free = POOL_SIZE ;
2018-10-31 22:19:15 +00:00
volatile size_t kmalloc_sum_eternal = 0 ;
2019-07-03 19:17:35 +00:00
u32 g_kmalloc_call_count ;
u32 g_kfree_call_count ;
2019-04-15 21:58:48 +00:00
bool g_dump_kmalloc_stacks ;
2019-04-15 17:43:12 +00:00
2019-07-03 19:17:35 +00:00
static u8 * s_next_eternal_ptr ;
static u8 * s_end_of_eternal_range ;
2018-11-02 19:41:58 +00:00
2019-01-27 09:17:56 +00:00
bool is_kmalloc_address ( const void * ptr )
2018-10-26 22:14:24 +00:00
{
2019-11-23 16:27:09 +00:00
if ( ptr > = ( u8 * ) ETERNAL_BASE_PHYSICAL & & ptr < s_next_eternal_ptr )
2018-10-31 22:19:15 +00:00
return true ;
2019-11-23 16:27:09 +00:00
return ( size_t ) ptr > = BASE_PHYSICAL & & ( size_t ) ptr < = ( BASE_PHYSICAL + POOL_SIZE ) ;
2018-10-26 22:14:24 +00:00
}
2018-11-09 00:25:31 +00:00
void kmalloc_init ( )
2018-10-16 09:01:38 +00:00
{
2019-01-15 23:44:09 +00:00
memset ( & alloc_map , 0 , sizeof ( alloc_map ) ) ;
2019-11-23 16:27:09 +00:00
memset ( ( void * ) BASE_PHYSICAL , 0 , POOL_SIZE ) ;
2018-10-16 09:01:38 +00:00
2018-10-31 22:19:15 +00:00
kmalloc_sum_eternal = 0 ;
2018-10-16 09:01:38 +00:00
sum_alloc = 0 ;
sum_free = POOL_SIZE ;
2018-10-31 22:19:15 +00:00
2019-11-23 16:27:09 +00:00
s_next_eternal_ptr = ( u8 * ) ETERNAL_BASE_PHYSICAL ;
2019-01-15 23:44:09 +00:00
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE ;
2018-10-31 22:19:15 +00:00
}
void * kmalloc_eternal ( size_t size )
{
void * ptr = s_next_eternal_ptr ;
s_next_eternal_ptr + = size ;
2018-11-02 19:41:58 +00:00
ASSERT ( s_next_eternal_ptr < s_end_of_eternal_range ) ;
2018-10-31 22:19:15 +00:00
kmalloc_sum_eternal + = size ;
return ptr ;
2018-10-16 09:01:38 +00:00
}
2018-12-26 20:31:46 +00:00
void * kmalloc_aligned ( size_t size , size_t alignment )
{
void * ptr = kmalloc ( size + alignment + sizeof ( void * ) ) ;
2019-02-22 09:23:06 +00:00
size_t max_addr = ( size_t ) ptr + alignment ;
2018-12-26 20:31:46 +00:00
void * aligned_ptr = ( void * ) ( max_addr - ( max_addr % alignment ) ) ;
( ( void * * ) aligned_ptr ) [ - 1 ] = ptr ;
return aligned_ptr ;
}
void kfree_aligned ( void * ptr )
{
kfree ( ( ( void * * ) ptr ) [ - 1 ] ) ;
}
2018-11-01 08:01:51 +00:00
void * kmalloc_page_aligned ( size_t size )
{
2018-12-26 20:31:46 +00:00
void * ptr = kmalloc_aligned ( size , PAGE_SIZE ) ;
2019-02-22 09:23:06 +00:00
size_t d = ( size_t ) ptr ;
2018-12-26 20:31:46 +00:00
ASSERT ( ( d & PAGE_MASK ) = = d ) ;
2018-11-01 08:01:51 +00:00
return ptr ;
}
2019-02-22 09:23:06 +00:00
void * kmalloc_impl ( size_t size )
2018-10-16 09:01:38 +00:00
{
2018-10-24 09:07:53 +00:00
InterruptDisabler disabler ;
2019-04-15 17:43:12 +00:00
+ + g_kmalloc_call_count ;
2018-10-23 22:51:19 +00:00
2019-04-15 21:58:48 +00:00
if ( g_dump_kmalloc_stacks & & ksyms_ready ) {
dbgprintf ( " kmalloc(%u) \n " , size ) ;
2019-05-16 11:41:16 +00:00
dump_backtrace ( ) ;
2019-04-15 21:58:48 +00:00
}
2019-04-03 12:41:40 +00:00
// We need space for the allocation_t structure at the head of the block.
size_t real_size = size + sizeof ( allocation_t ) ;
2018-10-16 09:01:38 +00:00
if ( sum_free < real_size ) {
2019-05-16 11:41:16 +00:00
dump_backtrace ( ) ;
2019-04-03 12:41:40 +00:00
kprintf ( " %s(%u) kmalloc(): PANIC! Out of memory (sucks, dude) \n sum_free=%u, real_size=%u \n " , current - > process ( ) . name ( ) . characters ( ) , current - > pid ( ) , sum_free , real_size ) ;
2019-02-15 11:30:48 +00:00
hang ( ) ;
2018-10-16 09:01:38 +00:00
}
2019-04-03 12:41:40 +00:00
size_t chunks_needed = real_size / CHUNK_SIZE ;
if ( real_size % CHUNK_SIZE )
+ + chunks_needed ;
2018-10-16 09:01:38 +00:00
2019-04-03 12:41:40 +00:00
size_t chunks_here = 0 ;
size_t first_chunk = 0 ;
2018-10-16 09:01:38 +00:00
2019-04-03 12:41:40 +00:00
for ( size_t i = 0 ; i < ( POOL_SIZE / CHUNK_SIZE / 8 ) ; + + i ) {
2018-11-12 14:25:57 +00:00
if ( alloc_map [ i ] = = 0xff ) {
// Skip over completely full bucket.
chunks_here = 0 ;
continue ;
}
// FIXME: This scan can be optimized further with LZCNT.
2019-04-03 12:41:40 +00:00
for ( size_t j = 0 ; j < 8 ; + + j ) {
2019-06-07 09:43:58 +00:00
if ( ! ( alloc_map [ i ] & ( 1 < < j ) ) ) {
2019-04-03 12:41:40 +00:00
if ( chunks_here = = 0 ) {
// Mark where potential allocation starts.
2018-10-16 09:01:38 +00:00
first_chunk = i * 8 + j ;
}
2019-04-03 12:41:40 +00:00
+ + chunks_here ;
2018-10-16 09:01:38 +00:00
2019-04-03 12:41:40 +00:00
if ( chunks_here = = chunks_needed ) {
2019-11-23 16:27:09 +00:00
auto * a = ( allocation_t * ) ( BASE_PHYSICAL + ( first_chunk * CHUNK_SIZE ) ) ;
2019-07-03 19:17:35 +00:00
u8 * ptr = ( u8 * ) a ;
2018-10-16 09:01:38 +00:00
ptr + = sizeof ( allocation_t ) ;
a - > nchunk = chunks_needed ;
a - > start = first_chunk ;
2019-04-03 12:41:40 +00:00
for ( size_t k = first_chunk ; k < ( first_chunk + chunks_needed ) ; + + k ) {
2018-10-16 09:01:38 +00:00
alloc_map [ k / 8 ] | = 1 < < ( k % 8 ) ;
}
sum_alloc + = a - > nchunk * CHUNK_SIZE ;
2019-04-03 12:41:40 +00:00
sum_free - = a - > nchunk * CHUNK_SIZE ;
2018-10-16 09:01:38 +00:00
# ifdef SANITIZE_KMALLOC
memset ( ptr , 0xbb , ( a - > nchunk * CHUNK_SIZE ) - sizeof ( allocation_t ) ) ;
# endif
return ptr ;
}
2019-04-03 12:41:40 +00:00
} else {
// This is in use, so restart chunks_here counter.
2018-10-16 09:01:38 +00:00
chunks_here = 0 ;
}
}
}
2019-04-03 12:41:40 +00:00
kprintf ( " %s(%u) kmalloc(): PANIC! Out of memory (no suitable block for size %u) \n " , current - > process ( ) . name ( ) . characters ( ) , current - > pid ( ) , size ) ;
2019-05-16 11:41:16 +00:00
dump_backtrace ( ) ;
2019-02-15 11:30:48 +00:00
hang ( ) ;
2018-10-16 09:01:38 +00:00
}
2019-06-07 09:43:58 +00:00
void kfree ( void * ptr )
2018-10-16 09:01:38 +00:00
{
2019-04-03 12:41:40 +00:00
if ( ! ptr )
2018-10-16 09:01:38 +00:00
return ;
2018-10-24 09:07:53 +00:00
InterruptDisabler disabler ;
2019-04-25 21:18:11 +00:00
+ + g_kfree_call_count ;
2018-10-23 22:51:19 +00:00
2019-07-03 19:17:35 +00:00
auto * a = ( allocation_t * ) ( ( ( ( u8 * ) ptr ) - sizeof ( allocation_t ) ) ) ;
2018-10-16 09:01:38 +00:00
2019-02-22 09:23:06 +00:00
for ( size_t k = a - > start ; k < ( a - > start + a - > nchunk ) ; + + k )
2018-10-16 09:01:38 +00:00
alloc_map [ k / 8 ] & = ~ ( 1 < < ( k % 8 ) ) ;
sum_alloc - = a - > nchunk * CHUNK_SIZE ;
2019-04-03 12:41:40 +00:00
sum_free + = a - > nchunk * CHUNK_SIZE ;
2018-10-16 09:01:38 +00:00
# ifdef SANITIZE_KMALLOC
memset ( a , 0xaa , a - > nchunk * CHUNK_SIZE ) ;
# endif
}
2019-11-27 13:06:24 +00:00
void * krealloc ( void * ptr , size_t new_size )
{
if ( ! ptr )
return kmalloc ( new_size ) ;
InterruptDisabler disabler ;
auto * a = ( allocation_t * ) ( ( ( ( u8 * ) ptr ) - sizeof ( allocation_t ) ) ) ;
size_t old_size = a - > nchunk * CHUNK_SIZE ;
if ( old_size = = new_size )
return ptr ;
auto * new_ptr = kmalloc ( new_size ) ;
memcpy ( new_ptr , ptr , min ( old_size , new_size ) ) ;
kfree ( ptr ) ;
return new_ptr ;
}
2018-12-02 22:34:50 +00:00
void * operator new ( size_t size )
2018-10-16 09:01:38 +00:00
{
return kmalloc ( size ) ;
}
2018-12-02 22:34:50 +00:00
void * operator new [ ] ( size_t size )
2018-10-16 09:01:38 +00:00
{
return kmalloc ( size ) ;
}
void operator delete ( void * ptr )
{
return kfree ( ptr ) ;
}
void operator delete [ ] ( void * ptr )
{
return kfree ( ptr ) ;
}
2019-02-22 09:23:06 +00:00
void operator delete ( void * ptr , size_t )
2018-10-16 09:01:38 +00:00
{
return kfree ( ptr ) ;
}
2019-02-22 09:23:06 +00:00
void operator delete [ ] ( void * ptr , size_t )
2018-10-16 09:01:38 +00:00
{
return kfree ( ptr ) ;
}