PageDirectory.cpp 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Memory.h>
  7. #include <AK/Singleton.h>
  8. #include <Kernel/Process.h>
  9. #include <Kernel/Random.h>
  10. #include <Kernel/Sections.h>
  11. #include <Kernel/VM/MemoryManager.h>
  12. #include <Kernel/VM/PageDirectory.h>
  13. namespace Kernel {
  14. static AK::Singleton<HashMap<FlatPtr, PageDirectory*>> s_cr3_map;
  15. static HashMap<FlatPtr, PageDirectory*>& cr3_map()
  16. {
  17. VERIFY_INTERRUPTS_DISABLED();
  18. return *s_cr3_map;
  19. }
  20. RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
  21. {
  22. ScopedSpinLock lock(s_mm_lock);
  23. return cr3_map().get(cr3).value_or({});
  24. }
  25. #if ARCH(X86_64)
  26. extern "C" PageDirectoryEntry boot_pml4t[1024];
  27. #endif
  28. extern "C" PageDirectoryEntry* boot_pdpt[4];
  29. extern "C" PageDirectoryEntry boot_pd0[1024];
  30. extern "C" PageDirectoryEntry boot_pd3[1024];
  31. UNMAP_AFTER_INIT PageDirectory::PageDirectory()
  32. {
  33. m_range_allocator.initialize_with_range(VirtualAddress(KERNEL_BASE + KERNEL_PD_OFFSET), KERNEL_PD_END - (KERNEL_BASE + KERNEL_PD_OFFSET));
  34. m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
  35. }
  36. UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
  37. {
  38. // Adopt the page tables already set up by boot.S
  39. #if ARCH(X86_64)
  40. PhysicalAddress boot_pml4t_paddr(virtual_to_low_physical((FlatPtr)boot_pml4t));
  41. dmesgln("MM: boot_pml4t @ {}", boot_pml4t_paddr);
  42. m_pml4t = PhysicalPage::create(boot_pml4t_paddr, MayReturnToFreeList::No);
  43. #endif
  44. PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
  45. PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
  46. PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
  47. dmesgln("MM: boot_pdpt @ {}", boot_pdpt_paddr);
  48. dmesgln("MM: boot_pd0 @ {}", boot_pd0_paddr);
  49. dmesgln("MM: boot_pd3 @ {}", boot_pd3_paddr);
  50. m_directory_table = PhysicalPage::create(boot_pdpt_paddr, MayReturnToFreeList::No);
  51. m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, MayReturnToFreeList::No);
  52. m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, MayReturnToFreeList::No);
  53. }
  54. PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
  55. {
  56. constexpr FlatPtr userspace_range_base = 0x00800000;
  57. constexpr FlatPtr userspace_range_ceiling = USER_RANGE_CEILING;
  58. ScopedSpinLock lock(s_mm_lock);
  59. if (parent_range_allocator) {
  60. m_range_allocator.initialize_from_parent(*parent_range_allocator);
  61. } else {
  62. size_t random_offset = (get_fast_random<u8>() % 32 * MiB) & PAGE_MASK;
  63. u32 base = userspace_range_base + random_offset;
  64. m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base);
  65. }
  66. // Set up a userspace page directory
  67. #if ARCH(X86_64)
  68. m_pml4t = MM.allocate_user_physical_page();
  69. if (!m_pml4t)
  70. return;
  71. #endif
  72. m_directory_table = MM.allocate_user_physical_page();
  73. if (!m_directory_table)
  74. return;
  75. m_directory_pages[0] = MM.allocate_user_physical_page();
  76. if (!m_directory_pages[0])
  77. return;
  78. m_directory_pages[1] = MM.allocate_user_physical_page();
  79. if (!m_directory_pages[1])
  80. return;
  81. m_directory_pages[2] = MM.allocate_user_physical_page();
  82. if (!m_directory_pages[2])
  83. return;
  84. // Share the top 1 GiB of kernel-only mappings (>=3GiB or >=KERNEL_BASE)
  85. m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
  86. #if ARCH(X86_64)
  87. {
  88. auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_pml4t);
  89. table.raw[0] = (FlatPtr)m_directory_table->paddr().as_ptr() | 7;
  90. MM.unquickmap_page();
  91. }
  92. #endif
  93. {
  94. auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table);
  95. #if ARCH(I386)
  96. table.raw[0] = (FlatPtr)m_directory_pages[0]->paddr().as_ptr() | 1;
  97. table.raw[1] = (FlatPtr)m_directory_pages[1]->paddr().as_ptr() | 1;
  98. table.raw[2] = (FlatPtr)m_directory_pages[2]->paddr().as_ptr() | 1;
  99. table.raw[3] = (FlatPtr)m_directory_pages[3]->paddr().as_ptr() | 1;
  100. #else
  101. table.raw[0] = (FlatPtr)m_directory_pages[0]->paddr().as_ptr() | 7;
  102. table.raw[1] = (FlatPtr)m_directory_pages[1]->paddr().as_ptr() | 7;
  103. table.raw[2] = (FlatPtr)m_directory_pages[2]->paddr().as_ptr() | 7;
  104. table.raw[3] = (FlatPtr)m_directory_pages[3]->paddr().as_ptr() | 7;
  105. #endif
  106. // 2 ** MAXPHYADDR - 1
  107. // Where MAXPHYADDR = physical_address_bit_width
  108. u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
  109. // bit 63 = no execute
  110. // bit 7 = page size
  111. // bit 5 = accessed
  112. // bit 4 = cache disable
  113. // bit 3 = write through
  114. // bit 2 = user/supervisor
  115. // bit 1 = read/write
  116. // bit 0 = present
  117. constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
  118. // This is to notify us of bugs where we're:
  119. // 1. Going over what the processor is capable of.
  120. // 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
  121. // when writing out the PDPT pointer to CR3.
  122. // The reason we're not checking the page directory's physical address directly is because
  123. // we're checking for sign extension when putting it into a PDPTE. See issue #4584.
  124. VERIFY((table.raw[0] & ~pdpte_bit_flags) <= max_physical_address);
  125. VERIFY((table.raw[1] & ~pdpte_bit_flags) <= max_physical_address);
  126. VERIFY((table.raw[2] & ~pdpte_bit_flags) <= max_physical_address);
  127. VERIFY((table.raw[3] & ~pdpte_bit_flags) <= max_physical_address);
  128. MM.unquickmap_page();
  129. }
  130. // Clone bottom 2 MiB of mappings from kernel_page_directory
  131. PageDirectoryEntry buffer;
  132. auto* kernel_pd = MM.quickmap_pd(MM.kernel_page_directory(), 0);
  133. memcpy(&buffer, kernel_pd, sizeof(PageDirectoryEntry));
  134. auto* new_pd = MM.quickmap_pd(*this, 0);
  135. memcpy(new_pd, &buffer, sizeof(PageDirectoryEntry));
  136. // If we got here, we successfully created it. Set m_space now
  137. m_valid = true;
  138. cr3_map().set(cr3(), this);
  139. }
  140. PageDirectory::~PageDirectory()
  141. {
  142. ScopedSpinLock lock(s_mm_lock);
  143. if (m_space)
  144. cr3_map().remove(cr3());
  145. }
  146. }