PageDirectory.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Memory.h>
  7. #include <AK/Singleton.h>
  8. #include <Kernel/Arch/CPU.h>
  9. #include <Kernel/Arch/PageDirectory.h>
  10. #include <Kernel/Memory/MemoryManager.h>
  11. #include <Kernel/Memory/PageDirectory.h>
  12. #include <Kernel/Prekernel/Prekernel.h>
  13. #include <Kernel/Process.h>
  14. #include <Kernel/Random.h>
  15. #include <Kernel/Sections.h>
  16. extern u8 start_of_kernel_image[];
  17. extern u8 end_of_kernel_image[];
  18. namespace Kernel::Memory {
  19. UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
  20. {
  21. auto directory = adopt_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
  22. auto kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
  23. MUST(directory->m_range_allocator.initialize_with_range(VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start));
  24. // Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
  25. FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
  26. FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
  27. MUST(directory->m_range_allocator.try_allocate_specific(VirtualAddress(start_of_range), end_of_range - start_of_range));
  28. return directory;
  29. }
  30. ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace()
  31. {
  32. auto directory = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PageDirectory));
  33. // NOTE: Take the MM lock since we need it for quickmap.
  34. SpinlockLocker lock(s_mm_lock);
  35. #if ARCH(X86_64)
  36. directory->m_pml4t = TRY(MM.allocate_user_physical_page());
  37. #endif
  38. directory->m_directory_table = TRY(MM.allocate_user_physical_page());
  39. auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
  40. for (size_t i = 0; i < kernel_pd_index; i++) {
  41. directory->m_directory_pages[i] = TRY(MM.allocate_user_physical_page());
  42. }
  43. // Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)
  44. directory->m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
  45. #if ARCH(X86_64)
  46. {
  47. auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_pml4t);
  48. table.raw[0] = (FlatPtr)directory->m_directory_table->paddr().as_ptr() | 7;
  49. MM.unquickmap_page();
  50. }
  51. #endif
  52. {
  53. auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table);
  54. for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
  55. if (directory->m_directory_pages[i]) {
  56. #if ARCH(I386)
  57. table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 1;
  58. #else
  59. table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 7;
  60. #endif
  61. }
  62. }
  63. // 2 ** MAXPHYADDR - 1
  64. // Where MAXPHYADDR = physical_address_bit_width
  65. u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
  66. // bit 63 = no execute
  67. // bit 7 = page size
  68. // bit 5 = accessed
  69. // bit 4 = cache disable
  70. // bit 3 = write through
  71. // bit 2 = user/supervisor
  72. // bit 1 = read/write
  73. // bit 0 = present
  74. constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
  75. // This is to notify us of bugs where we're:
  76. // 1. Going over what the processor is capable of.
  77. // 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
  78. // when writing out the PDPT pointer to CR3.
  79. // The reason we're not checking the page directory's physical address directly is because
  80. // we're checking for sign extension when putting it into a PDPTE. See issue #4584.
  81. for (auto table_entry : table.raw)
  82. VERIFY((table_entry & ~pdpte_bit_flags) <= max_physical_address);
  83. MM.unquickmap_page();
  84. }
  85. register_page_directory(directory);
  86. return directory;
  87. }
  88. PageDirectory::PageDirectory() = default;
  89. UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
  90. {
  91. // Adopt the page tables already set up by boot.S
  92. #if ARCH(X86_64)
  93. dmesgln("MM: boot_pml4t @ {}", boot_pml4t);
  94. m_pml4t = PhysicalPage::create(boot_pml4t, MayReturnToFreeList::No);
  95. #endif
  96. dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
  97. dmesgln("MM: boot_pd0 @ {}", boot_pd0);
  98. dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
  99. m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
  100. m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
  101. m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
  102. }
  103. PageDirectory::~PageDirectory()
  104. {
  105. if (is_cr3_initialized()) {
  106. SpinlockLocker lock(s_mm_lock);
  107. deregister_page_directory(this);
  108. }
  109. }
  110. }