MemoryManager.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/HashTable.h>
  9. #include <AK/NonnullOwnPtrVector.h>
  10. #include <AK/NonnullRefPtrVector.h>
  11. #include <AK/String.h>
  12. #include <Kernel/Arch/x86/PageFault.h>
  13. #include <Kernel/Arch/x86/TrapFrame.h>
  14. #include <Kernel/Forward.h>
  15. #include <Kernel/SpinLock.h>
  16. #include <Kernel/VM/AllocationStrategy.h>
  17. #include <Kernel/VM/PhysicalPage.h>
  18. #include <Kernel/VM/PhysicalRegion.h>
  19. #include <Kernel/VM/Region.h>
  20. #include <Kernel/VM/VMObject.h>
  21. namespace Kernel {
  22. constexpr bool page_round_up_would_wrap(FlatPtr x)
  23. {
  24. return x > (explode_byte(0xFF) & ~0xFFF);
  25. }
  26. constexpr FlatPtr page_round_up(FlatPtr x)
  27. {
  28. FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
  29. // Rounding up >0xfffff000 wraps back to 0. That's never what we want.
  30. VERIFY(x == 0 || rounded != 0);
  31. return rounded;
  32. }
  33. constexpr FlatPtr page_round_down(FlatPtr x)
  34. {
  35. return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
  36. }
  37. inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
  38. {
  39. return virtual_ - physical_to_virtual_offset;
  40. }
  41. enum class UsedMemoryRangeType {
  42. LowMemory = 0,
  43. Prekernel,
  44. Kernel,
  45. BootModule,
  46. PhysicalPages,
  47. };
  48. static constexpr StringView UserMemoryRangeTypeNames[] {
  49. "Low memory",
  50. "Prekernel",
  51. "Kernel",
  52. "Boot module",
  53. "Physical Pages"
  54. };
  55. struct UsedMemoryRange {
  56. UsedMemoryRangeType type {};
  57. PhysicalAddress start;
  58. PhysicalAddress end;
  59. };
  60. struct ContiguousReservedMemoryRange {
  61. PhysicalAddress start;
  62. PhysicalSize length {};
  63. };
  64. enum class PhysicalMemoryRangeType {
  65. Usable = 0,
  66. Reserved,
  67. ACPI_Reclaimable,
  68. ACPI_NVS,
  69. BadMemory,
  70. Unknown,
  71. };
  72. struct PhysicalMemoryRange {
  73. PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
  74. PhysicalAddress start;
  75. PhysicalSize length {};
  76. };
  77. #define MM Kernel::MemoryManager::the()
  78. struct MemoryManagerData {
  79. static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
  80. SpinLock<u8> m_quickmap_in_use;
  81. u32 m_quickmap_prev_flags;
  82. PhysicalAddress m_last_quickmap_pd;
  83. PhysicalAddress m_last_quickmap_pt;
  84. };
  85. extern RecursiveSpinLock s_mm_lock;
  86. class MemoryManager {
  87. AK_MAKE_ETERNAL
  88. friend class PageDirectory;
  89. friend class AnonymousVMObject;
  90. friend class Region;
  91. friend class VMObject;
  92. public:
  93. static MemoryManager& the();
  94. static bool is_initialized();
  95. static void initialize(u32 cpu);
  96. static inline MemoryManagerData& get_data()
  97. {
  98. return ProcessorSpecific<MemoryManagerData>::get();
  99. }
  100. PageFaultResponse handle_page_fault(PageFault const&);
  101. void set_page_writable_direct(VirtualAddress, bool);
  102. void protect_readonly_after_init_memory();
  103. void unmap_text_after_init();
  104. void unmap_ksyms_after_init();
  105. static void enter_process_paging_scope(Process&);
  106. static void enter_space(Space&);
  107. bool validate_user_stack_no_lock(Space&, VirtualAddress) const;
  108. bool validate_user_stack(Space&, VirtualAddress) const;
  109. enum class ShouldZeroFill {
  110. No,
  111. Yes
  112. };
  113. bool commit_user_physical_pages(size_t);
  114. void uncommit_user_physical_pages(size_t);
  115. NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
  116. RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
  117. RefPtr<PhysicalPage> allocate_supervisor_physical_page();
  118. NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
  119. void deallocate_physical_page(PhysicalAddress);
  120. OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  121. OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
  122. OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  123. OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  124. OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  125. OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  126. struct SystemMemoryInfo {
  127. PhysicalSize user_physical_pages { 0 };
  128. PhysicalSize user_physical_pages_used { 0 };
  129. PhysicalSize user_physical_pages_committed { 0 };
  130. PhysicalSize user_physical_pages_uncommitted { 0 };
  131. PhysicalSize super_physical_pages { 0 };
  132. PhysicalSize super_physical_pages_used { 0 };
  133. };
  134. SystemMemoryInfo get_system_memory_info()
  135. {
  136. ScopedSpinLock lock(s_mm_lock);
  137. return m_system_memory_info;
  138. }
  139. template<IteratorFunction<VMObject&> Callback>
  140. static void for_each_vmobject(Callback callback)
  141. {
  142. ScopedSpinLock locker(s_mm_lock);
  143. for (auto& vmobject : MM.m_vmobjects) {
  144. if (callback(vmobject) == IterationDecision::Break)
  145. break;
  146. }
  147. }
  148. template<VoidFunction<VMObject&> Callback>
  149. static void for_each_vmobject(Callback callback)
  150. {
  151. for (auto& vmobject : MM.m_vmobjects)
  152. callback(vmobject);
  153. }
  154. static Region* find_user_region_from_vaddr(Space&, VirtualAddress);
  155. static Region* find_user_region_from_vaddr_no_lock(Space&, VirtualAddress);
  156. static void validate_syscall_preconditions(Space&, RegisterState const&);
  157. void dump_kernel_regions();
  158. PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
  159. PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
  160. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  161. Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
  162. bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const;
  163. PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
  164. PhysicalAddress get_physical_address(PhysicalPage const&);
  165. private:
  166. MemoryManager();
  167. ~MemoryManager();
  168. void initialize_physical_pages();
  169. void register_reserved_ranges();
  170. void register_vmobject(VMObject&);
  171. void unregister_vmobject(VMObject&);
  172. void register_region(Region&);
  173. void unregister_region(Region&);
  174. void protect_kernel_image();
  175. void parse_memory_map();
  176. static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
  177. static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
  178. static Region* kernel_region_from_vaddr(VirtualAddress);
  179. static Region* find_region_from_vaddr(VirtualAddress);
  180. RefPtr<PhysicalPage> find_free_user_physical_page(bool);
  181. ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
  182. {
  183. return quickmap_page(page.paddr());
  184. }
  185. u8* quickmap_page(PhysicalAddress const&);
  186. void unquickmap_page();
  187. PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
  188. PageTableEntry* quickmap_pt(PhysicalAddress);
  189. PageTableEntry* pte(PageDirectory&, VirtualAddress);
  190. PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
  191. void release_pte(PageDirectory&, VirtualAddress, bool);
  192. RefPtr<PageDirectory> m_kernel_page_directory;
  193. RefPtr<PhysicalPage> m_shared_zero_page;
  194. RefPtr<PhysicalPage> m_lazy_committed_page;
  195. SystemMemoryInfo m_system_memory_info;
  196. NonnullOwnPtrVector<PhysicalRegion> m_user_physical_regions;
  197. NonnullOwnPtrVector<PhysicalRegion> m_super_physical_regions;
  198. OwnPtr<PhysicalRegion> m_physical_pages_region;
  199. PhysicalPageEntry* m_physical_page_entries { nullptr };
  200. size_t m_physical_page_entries_count { 0 };
  201. Region::ListInMemoryManager m_user_regions;
  202. Region::ListInMemoryManager m_kernel_regions;
  203. Vector<UsedMemoryRange> m_used_memory_ranges;
  204. Vector<PhysicalMemoryRange> m_physical_memory_ranges;
  205. Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
  206. VMObject::List m_vmobjects;
  207. };
  208. inline bool is_user_address(VirtualAddress vaddr)
  209. {
  210. return vaddr.get() < USER_RANGE_CEILING;
  211. }
  212. inline bool is_user_range(VirtualAddress vaddr, size_t size)
  213. {
  214. if (vaddr.offset(size) < vaddr)
  215. return false;
  216. return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
  217. }
  218. inline bool is_user_range(Range const& range)
  219. {
  220. return is_user_range(range.base(), range.size());
  221. }
  222. inline bool PhysicalPage::is_shared_zero_page() const
  223. {
  224. return this == &MM.shared_zero_page();
  225. }
  226. inline bool PhysicalPage::is_lazy_committed_page() const
  227. {
  228. return this == &MM.lazy_committed_page();
  229. }
  230. }