|
@@ -12,8 +12,6 @@
|
|
//#define PAGE_FAULT_DEBUG
|
|
//#define PAGE_FAULT_DEBUG
|
|
|
|
|
|
static MemoryManager* s_the;
|
|
static MemoryManager* s_the;
|
|
-unsigned MemoryManager::s_user_physical_pages_in_existence;
|
|
|
|
-unsigned MemoryManager::s_super_physical_pages_in_existence;
|
|
|
|
|
|
|
|
MemoryManager& MM
|
|
MemoryManager& MM
|
|
{
|
|
{
|
|
@@ -78,6 +76,14 @@ void MemoryManager::initialize_paging()
|
|
// 5 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
|
|
// 5 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
|
|
// 0xc0000000-0xffffffff Kernel-only linear address space
|
|
// 0xc0000000-0xffffffff Kernel-only linear address space
|
|
|
|
|
|
|
|
+#ifdef MM_DEBUG
|
|
|
|
+ dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
|
|
|
|
+#endif
|
|
|
|
+ m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE);
|
|
|
|
+
|
|
|
|
+ RetainPtr<PhysicalRegion> region = nullptr;
|
|
|
|
+ bool region_is_super = false;
|
|
|
|
+
|
|
for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
|
|
for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
|
|
kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
|
|
kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
|
|
(dword)(mmap->addr >> 32),
|
|
(dword)(mmap->addr >> 32),
|
|
@@ -88,26 +94,48 @@ void MemoryManager::initialize_paging()
|
|
|
|
|
|
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
|
|
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
|
|
continue;
|
|
continue;
|
|
|
|
+
|
|
// FIXME: Maybe make use of stuff below the 1MB mark?
|
|
// FIXME: Maybe make use of stuff below the 1MB mark?
|
|
if (mmap->addr < (1 * MB))
|
|
if (mmap->addr < (1 * MB))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
|
|
+#ifdef MM_DEBUG
|
|
|
|
+ kprintf("MM: considering memory at %p - %p\n",
|
|
|
|
+ (dword)mmap->addr, (dword)(mmap->addr + mmap->len));
|
|
|
|
+#endif
|
|
|
|
+
|
|
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
|
|
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
|
|
- if (page_base < (4 * MB)) {
|
|
|
|
- // Skip over pages managed by kmalloc.
|
|
|
|
- continue;
|
|
|
|
|
|
+ auto addr = PhysicalAddress(page_base);
|
|
|
|
+
|
|
|
|
+ if (page_base < 4 * MB) {
|
|
|
|
+ // nothing
|
|
|
|
+ } else if (page_base >= 4 * MB && page_base < 5 * MB) {
|
|
|
|
+ if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
|
|
|
|
+ m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
|
|
|
|
+ region = m_super_physical_regions.last();
|
|
|
|
+ region_is_super = true;
|
|
|
|
+ } else {
|
|
|
|
+ region->expand(region->lower(), addr);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
|
|
|
|
+ m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
|
|
|
|
+ region = m_user_physical_regions.last();
|
|
|
|
+ region_is_super = false;
|
|
|
|
+ } else {
|
|
|
|
+ region->expand(region->lower(), addr);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
-
|
|
|
|
- if (page_base < (5 * MB))
|
|
|
|
- m_free_supervisor_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(page_base), true));
|
|
|
|
- else
|
|
|
|
- m_free_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(page_base), false));
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE);
|
|
|
|
|
|
+ for (auto& region : m_super_physical_regions)
|
|
|
|
+ m_super_physical_pages += region->finalize_capacity();
|
|
|
|
+
|
|
|
|
+ for (auto& region : m_user_physical_regions)
|
|
|
|
+ m_user_physical_pages += region->finalize_capacity();
|
|
|
|
+
|
|
#ifdef MM_DEBUG
|
|
#ifdef MM_DEBUG
|
|
- dbgprintf("MM: Quickmap will use P%x\n", m_quickmap_addr.get());
|
|
|
|
dbgprintf("MM: Installing page directory\n");
|
|
dbgprintf("MM: Installing page directory\n");
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -282,7 +310,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
|
remap_region_page(region, page_index_in_region, true);
|
|
remap_region_page(region, page_index_in_region, true);
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
- auto physical_page = allocate_physical_page(ShouldZeroFill::Yes);
|
|
|
|
|
|
+ auto physical_page = allocate_user_physical_page(ShouldZeroFill::Yes);
|
|
#ifdef PAGE_FAULT_DEBUG
|
|
#ifdef PAGE_FAULT_DEBUG
|
|
dbgprintf(" >> ZERO P%x\n", physical_page->paddr().get());
|
|
dbgprintf(" >> ZERO P%x\n", physical_page->paddr().get());
|
|
#endif
|
|
#endif
|
|
@@ -309,7 +337,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
|
dbgprintf(" >> It's a COW page and it's time to COW!\n");
|
|
dbgprintf(" >> It's a COW page and it's time to COW!\n");
|
|
#endif
|
|
#endif
|
|
auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
|
|
auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
|
|
- auto physical_page = allocate_physical_page(ShouldZeroFill::No);
|
|
|
|
|
|
+ auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
|
|
byte* dest_ptr = quickmap_page(*physical_page);
|
|
byte* dest_ptr = quickmap_page(*physical_page);
|
|
const byte* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
|
const byte* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
|
#ifdef PAGE_FAULT_DEBUG
|
|
#ifdef PAGE_FAULT_DEBUG
|
|
@@ -360,7 +388,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
|
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
|
|
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
|
|
}
|
|
}
|
|
cli();
|
|
cli();
|
|
- vmo_page = allocate_physical_page(ShouldZeroFill::No);
|
|
|
|
|
|
+ vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
|
|
if (vmo_page.is_null()) {
|
|
if (vmo_page.is_null()) {
|
|
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
|
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
|
return false;
|
|
return false;
|
|
@@ -430,40 +458,114 @@ RetainPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& na
|
|
return region;
|
|
return region;
|
|
}
|
|
}
|
|
|
|
|
|
-RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill)
|
|
|
|
|
|
+void MemoryManager::deallocate_user_physical_page(PhysicalPage& page)
|
|
|
|
+{
|
|
|
|
+ for (auto& region : m_user_physical_regions) {
|
|
|
|
+ if (!region->contains(page)) {
|
|
|
|
+ kprintf(
|
|
|
|
+ "MM: deallocate_user_physical_page: %p not in %p -> %p\n",
|
|
|
|
+ page.paddr(), region->lower().get(), region->upper().get());
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ region->return_page(page);
|
|
|
|
+ m_user_physical_pages_used--;
|
|
|
|
+
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ kprintf("MM: deallocate_user_physical_page couldn't figure out region for user page @ %p\n", page.paddr());
|
|
|
|
+ ASSERT_NOT_REACHED();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+RetainPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
|
|
{
|
|
{
|
|
InterruptDisabler disabler;
|
|
InterruptDisabler disabler;
|
|
- if (1 > m_free_physical_pages.size()) {
|
|
|
|
- kprintf("FUCK! No physical pages available.\n");
|
|
|
|
|
|
+
|
|
|
|
+ RetainPtr<PhysicalPage> page = nullptr;
|
|
|
|
+
|
|
|
|
+ for (auto& region : m_user_physical_regions) {
|
|
|
|
+ page = region->take_free_page(false);
|
|
|
|
+ if (page.is_null())
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!page) {
|
|
|
|
+ if (m_user_physical_regions.is_empty()) {
|
|
|
|
+ kprintf("MM: no user physical regions available (?)\n");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ kprintf("MM: no user physical pages available\n");
|
|
ASSERT_NOT_REACHED();
|
|
ASSERT_NOT_REACHED();
|
|
return {};
|
|
return {};
|
|
}
|
|
}
|
|
|
|
+
|
|
#ifdef MM_DEBUG
|
|
#ifdef MM_DEBUG
|
|
- dbgprintf("MM: allocate_physical_page vending P%x (%u remaining)\n", m_free_physical_pages.last()->paddr().get(), m_free_physical_pages.size());
|
|
|
|
|
|
+ dbgprintf("MM: allocate_user_physical_page vending P%p\n", page->paddr().get());
|
|
#endif
|
|
#endif
|
|
- auto physical_page = m_free_physical_pages.take_last();
|
|
|
|
|
|
+
|
|
if (should_zero_fill == ShouldZeroFill::Yes) {
|
|
if (should_zero_fill == ShouldZeroFill::Yes) {
|
|
- auto* ptr = (dword*)quickmap_page(*physical_page);
|
|
|
|
|
|
+ auto* ptr = (dword*)quickmap_page(*page);
|
|
fast_dword_fill(ptr, 0, PAGE_SIZE / sizeof(dword));
|
|
fast_dword_fill(ptr, 0, PAGE_SIZE / sizeof(dword));
|
|
unquickmap_page();
|
|
unquickmap_page();
|
|
}
|
|
}
|
|
- return physical_page;
|
|
|
|
|
|
+
|
|
|
|
+ m_user_physical_pages_used++;
|
|
|
|
+
|
|
|
|
+ return page;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage& page)
|
|
|
|
+{
|
|
|
|
+ for (auto& region : m_super_physical_regions) {
|
|
|
|
+ if (!region->contains(page)) {
|
|
|
|
+ kprintf(
|
|
|
|
+ "MM: deallocate_supervisor_physical_page: %p not in %p -> %p\n",
|
|
|
|
+ page.paddr(), region->lower().get(), region->upper().get());
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ region->return_page(page);
|
|
|
|
+ m_super_physical_pages_used--;
|
|
|
|
+
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ kprintf("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ %p\n", page.paddr());
|
|
|
|
+ ASSERT_NOT_REACHED();
|
|
}
|
|
}
|
|
|
|
|
|
RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
|
|
RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
|
|
{
|
|
{
|
|
InterruptDisabler disabler;
|
|
InterruptDisabler disabler;
|
|
- if (1 > m_free_supervisor_physical_pages.size()) {
|
|
|
|
- kprintf("FUCK! No physical pages available.\n");
|
|
|
|
|
|
+
|
|
|
|
+ RetainPtr<PhysicalPage> page = nullptr;
|
|
|
|
+
|
|
|
|
+ for (auto& region : m_super_physical_regions) {
|
|
|
|
+ page = region->take_free_page(true);
|
|
|
|
+ if (page.is_null())
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!page) {
|
|
|
|
+ if (m_super_physical_regions.is_empty()) {
|
|
|
|
+ kprintf("MM: no super physical regions available (?)\n");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ kprintf("MM: no super physical pages available\n");
|
|
ASSERT_NOT_REACHED();
|
|
ASSERT_NOT_REACHED();
|
|
return {};
|
|
return {};
|
|
}
|
|
}
|
|
|
|
+
|
|
#ifdef MM_DEBUG
|
|
#ifdef MM_DEBUG
|
|
- dbgprintf("MM: allocate_supervisor_physical_page vending P%x (%u remaining)\n", m_free_supervisor_physical_pages.last()->paddr().get(), m_free_supervisor_physical_pages.size());
|
|
|
|
|
|
+ dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
|
|
#endif
|
|
#endif
|
|
- auto physical_page = m_free_supervisor_physical_pages.take_last();
|
|
|
|
- fast_dword_fill((dword*)physical_page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(dword));
|
|
|
|
- return physical_page;
|
|
|
|
|
|
+
|
|
|
|
+ fast_dword_fill((dword*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(dword));
|
|
|
|
+
|
|
|
|
+ m_super_physical_pages_used++;
|
|
|
|
+
|
|
|
|
+ return page;
|
|
}
|
|
}
|
|
|
|
|
|
void MemoryManager::enter_process_paging_scope(Process& process)
|
|
void MemoryManager::enter_process_paging_scope(Process& process)
|