Tidy up memory map a bit and write out the general map in MemoryManager.

There was a bug that given enough supervisor page allocation, we would
eventually start dipping into the kmalloc range.
This commit is contained in:
Andreas Kling 2019-01-16 00:44:09 +01:00
parent bd3e77cc16
commit a8baee4dcd
Notes: sideshowbarker 2024-07-19 16:01:49 +09:00
2 changed files with 17 additions and 13 deletions

View file

@ -69,20 +69,24 @@ void MemoryManager::initialize_paging()
// The bottom 4 MB (except for the null page) are identity mapped & supervisor only.
// Every process shares these mappings.
create_identity_mapping(kernel_page_directory(), LinearAddress(PAGE_SIZE), (4 * MB) - PAGE_SIZE);
// Physical pages from this range are used for page tables.
for (size_t i = (1 * MB); i < (4 * MB); i += PAGE_SIZE)
// Basic memory map:
// 0 -> 512 kB Kernel code. Root page directory & PDE 0.
// 1 MB -> 2 MB kmalloc_eternal() space.
// 2 MB -> 3 MB kmalloc() space.
// 3 MB -> 4 MB Supervisor physical pages (available for allocation!)
// 4 MB -> 32 MB Userspace physical pages (available for allocation!)
for (size_t i = (2 * MB); i < (4 * MB); i += PAGE_SIZE)
m_free_supervisor_physical_pages.append(adopt(*new PhysicalPage(PhysicalAddress(i), true)));
#ifdef MM_DEBUG
dbgprintf("MM: 4MB-32MB available for allocation\n");
#endif
// The physical pages 4 MB through 8 MB are available for allocation.
for (size_t i = (4 * MB); i < (32 * MB); i += PAGE_SIZE)
m_free_physical_pages.append(adopt(*new PhysicalPage(PhysicalAddress(i), false)));
m_quickmap_addr = LinearAddress(m_free_physical_pages.takeLast().leakRef()->paddr().get());
kprintf("MM: Quickmap will use P%x\n", m_quickmap_addr.get());
#ifdef MM_DEBUG
dbgprintf("MM: Quickmap will use P%x\n", m_quickmap_addr.get());
dbgprintf("MM: Installing page directory\n");
#endif
asm volatile("movl %%eax, %%cr3"::"a"(kernel_page_directory().cr3()));

View file

@ -23,10 +23,10 @@ typedef struct
#define CHUNK_SIZE 128
#define POOL_SIZE (1024 * 1024)
#define PAGE_ALIGNED_BASE_PHYSICAL 0x300000
#define ETERNAL_BASE_PHYSICAL 0x200000
#define BASE_PHYS 0x100000
#define ETERNAL_BASE_PHYSICAL 0x100000
#define ETERNAL_RANGE_SIZE 0x100000
#define BASE_PHYSICAL 0x200000
#define RANGE_SIZE 0x100000
static byte alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
@ -42,20 +42,20 @@ bool is_kmalloc_address(void* ptr)
{
if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
return true;
return (dword)ptr >= BASE_PHYS && (dword)ptr <= (BASE_PHYS + POOL_SIZE);
return (dword)ptr >= BASE_PHYSICAL && (dword)ptr <= (BASE_PHYSICAL + POOL_SIZE);
}
void kmalloc_init()
{
memset( &alloc_map, 0, sizeof(alloc_map) );
memset( (void *)BASE_PHYS, 0, POOL_SIZE );
memset(&alloc_map, 0, sizeof(alloc_map));
memset((void *)BASE_PHYSICAL, 0, POOL_SIZE);
kmalloc_sum_eternal = 0;
sum_alloc = 0;
sum_free = POOL_SIZE;
s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
s_end_of_eternal_range = s_next_eternal_ptr + RANGE_SIZE;
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
}
void* kmalloc_eternal(size_t size)
@ -136,7 +136,7 @@ void* kmalloc_impl(dword size)
if( chunks_here == chunks_needed )
{
auto* a = (allocation_t *)(BASE_PHYS + (first_chunk * CHUNK_SIZE));
auto* a = (allocation_t *)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
byte *ptr = (byte *)a;
ptr += sizeof(allocation_t);
a->nchunk = chunks_needed;