|
@@ -1037,53 +1037,41 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres
|
|
|
|
|
|
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
|
|
|
{
|
|
|
+ VERIFY_INTERRUPTS_DISABLED();
|
|
|
VERIFY(s_mm_lock.is_locked_by_current_processor());
|
|
|
- auto& mm_data = get_data();
|
|
|
- auto& pte = boot_pd_kernel_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
|
|
|
+
|
|
|
+ VirtualAddress vaddr(KERNEL_QUICKMAP_PD_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
|
|
+ size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
|
|
+
|
|
|
+ auto& pte = boot_pd_kernel_pt1023[pte_index];
|
|
|
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
|
|
|
if (pte.physical_page_base() != pd_paddr.get()) {
|
|
|
pte.set_physical_page_base(pd_paddr.get());
|
|
|
pte.set_present(true);
|
|
|
pte.set_writable(true);
|
|
|
pte.set_user_allowed(false);
|
|
|
- // Because we must continue to hold the MM lock while we use this
|
|
|
- // mapping, it is sufficient to only flush on the current CPU. Other
|
|
|
- // CPUs trying to use this API must wait on the MM lock anyway
|
|
|
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
|
|
|
- } else {
|
|
|
- // Even though we don't allow this to be called concurrently, it's
|
|
|
- // possible that this PD was mapped on a different CPU and we don't
|
|
|
- // broadcast the flush. If so, we still need to flush the TLB.
|
|
|
- if (mm_data.m_last_quickmap_pd != pd_paddr)
|
|
|
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
|
|
|
+ flush_tlb_local(vaddr);
|
|
|
}
|
|
|
- mm_data.m_last_quickmap_pd = pd_paddr;
|
|
|
- return (PageDirectoryEntry*)KERNEL_QUICKMAP_PD;
|
|
|
+ return (PageDirectoryEntry*)vaddr.get();
|
|
|
}
|
|
|
|
|
|
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
|
|
{
|
|
|
+ VERIFY_INTERRUPTS_DISABLED();
|
|
|
VERIFY(s_mm_lock.is_locked_by_current_processor());
|
|
|
- auto& mm_data = get_data();
|
|
|
- auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
|
|
|
+
|
|
|
+ VirtualAddress vaddr(KERNEL_QUICKMAP_PT_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
|
|
+ size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
|
|
+
|
|
|
+ auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_index];
|
|
|
if (pte.physical_page_base() != pt_paddr.get()) {
|
|
|
pte.set_physical_page_base(pt_paddr.get());
|
|
|
pte.set_present(true);
|
|
|
pte.set_writable(true);
|
|
|
pte.set_user_allowed(false);
|
|
|
- // Because we must continue to hold the MM lock while we use this
|
|
|
- // mapping, it is sufficient to only flush on the current CPU. Other
|
|
|
- // CPUs trying to use this API must wait on the MM lock anyway
|
|
|
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
|
|
|
- } else {
|
|
|
- // Even though we don't allow this to be called concurrently, it's
|
|
|
- // possible that this PT was mapped on a different CPU and we don't
|
|
|
- // broadcast the flush. If so, we still need to flush the TLB.
|
|
|
- if (mm_data.m_last_quickmap_pt != pt_paddr)
|
|
|
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
|
|
|
+ flush_tlb_local(vaddr);
|
|
|
}
|
|
|
- mm_data.m_last_quickmap_pt = pt_paddr;
|
|
|
- return (PageTableEntry*)KERNEL_QUICKMAP_PT;
|
|
|
+ return (PageTableEntry*)vaddr.get();
|
|
|
}
|
|
|
|
|
|
u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
|