PageDirectory.cpp 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Memory.h>
  7. #include <AK/Singleton.h>
  8. #include <Kernel/Memory/MemoryManager.h>
  9. #include <Kernel/Memory/PageDirectory.h>
  10. #include <Kernel/Prekernel/Prekernel.h>
  11. #include <Kernel/Process.h>
  12. #include <Kernel/Random.h>
  13. #include <Kernel/Sections.h>
  14. extern u8 end_of_kernel_image[];
  15. namespace Kernel::Memory {
  16. static Singleton<IntrusiveRedBlackTree<&PageDirectory::m_tree_node>> s_cr3_map;
  17. static IntrusiveRedBlackTree<&PageDirectory::m_tree_node>& cr3_map()
  18. {
  19. VERIFY_INTERRUPTS_DISABLED();
  20. return *s_cr3_map;
  21. }
  22. RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
  23. {
  24. SpinlockLocker lock(s_mm_lock);
  25. return cr3_map().find(cr3);
  26. }
  27. UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
  28. {
  29. auto directory = adopt_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
  30. // make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
  31. FlatPtr start_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
  32. MUST(directory->m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range));
  33. return directory;
  34. }
  35. ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator)
  36. {
  37. constexpr FlatPtr userspace_range_base = USER_RANGE_BASE;
  38. FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
  39. auto directory = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PageDirectory));
  40. if (parent_range_allocator) {
  41. TRY(directory->m_range_allocator.initialize_from_parent(*parent_range_allocator));
  42. } else {
  43. size_t random_offset = (get_fast_random<u8>() % 32 * MiB) & PAGE_MASK;
  44. u32 base = userspace_range_base + random_offset;
  45. TRY(directory->m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base));
  46. }
  47. // NOTE: Take the MM lock since we need it for quickmap.
  48. SpinlockLocker lock(s_mm_lock);
  49. #if ARCH(X86_64)
  50. directory->m_pml4t = MM.allocate_user_physical_page();
  51. if (!directory->m_pml4t)
  52. return ENOMEM;
  53. #endif
  54. directory->m_directory_table = MM.allocate_user_physical_page();
  55. if (!directory->m_directory_table)
  56. return ENOMEM;
  57. auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
  58. for (size_t i = 0; i < kernel_pd_index; i++) {
  59. directory->m_directory_pages[i] = MM.allocate_user_physical_page();
  60. if (!directory->m_directory_pages[i])
  61. return ENOMEM;
  62. }
  63. // Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)
  64. directory->m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
  65. #if ARCH(X86_64)
  66. {
  67. auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_pml4t);
  68. table.raw[0] = (FlatPtr)directory->m_directory_table->paddr().as_ptr() | 7;
  69. MM.unquickmap_page();
  70. }
  71. #endif
  72. {
  73. auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table);
  74. for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
  75. if (directory->m_directory_pages[i]) {
  76. #if ARCH(I386)
  77. table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 1;
  78. #else
  79. table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 7;
  80. #endif
  81. }
  82. }
  83. // 2 ** MAXPHYADDR - 1
  84. // Where MAXPHYADDR = physical_address_bit_width
  85. u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
  86. // bit 63 = no execute
  87. // bit 7 = page size
  88. // bit 5 = accessed
  89. // bit 4 = cache disable
  90. // bit 3 = write through
  91. // bit 2 = user/supervisor
  92. // bit 1 = read/write
  93. // bit 0 = present
  94. constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
  95. // This is to notify us of bugs where we're:
  96. // 1. Going over what the processor is capable of.
  97. // 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
  98. // when writing out the PDPT pointer to CR3.
  99. // The reason we're not checking the page directory's physical address directly is because
  100. // we're checking for sign extension when putting it into a PDPTE. See issue #4584.
  101. for (auto table_entry : table.raw)
  102. VERIFY((table_entry & ~pdpte_bit_flags) <= max_physical_address);
  103. MM.unquickmap_page();
  104. }
  105. cr3_map().insert(directory->cr3(), directory);
  106. return directory;
  107. }
  108. PageDirectory::PageDirectory()
  109. {
  110. }
  111. UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
  112. {
  113. // Adopt the page tables already set up by boot.S
  114. #if ARCH(X86_64)
  115. dmesgln("MM: boot_pml4t @ {}", boot_pml4t);
  116. m_pml4t = PhysicalPage::create(boot_pml4t, MayReturnToFreeList::No);
  117. #endif
  118. dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
  119. dmesgln("MM: boot_pd0 @ {}", boot_pd0);
  120. dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
  121. m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
  122. m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
  123. m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
  124. }
  125. PageDirectory::~PageDirectory()
  126. {
  127. SpinlockLocker lock(s_mm_lock);
  128. if (m_space)
  129. cr3_map().remove(cr3());
  130. }
  131. }