MemoryManager.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/HashTable.h>
  9. #include <AK/NonnullOwnPtrVector.h>
  10. #include <AK/NonnullRefPtrVector.h>
  11. #include <AK/String.h>
  12. #include <Kernel/Arch/x86/PageFault.h>
  13. #include <Kernel/Arch/x86/TrapFrame.h>
  14. #include <Kernel/Forward.h>
  15. #include <Kernel/SpinLock.h>
  16. #include <Kernel/VM/AllocationStrategy.h>
  17. #include <Kernel/VM/PhysicalPage.h>
  18. #include <Kernel/VM/PhysicalRegion.h>
  19. #include <Kernel/VM/Region.h>
  20. #include <Kernel/VM/VMObject.h>
  21. namespace Kernel {
  22. constexpr bool page_round_up_would_wrap(FlatPtr x)
  23. {
  24. return x > (explode_byte(0xFF) & ~0xFFF);
  25. }
  26. constexpr FlatPtr page_round_up(FlatPtr x)
  27. {
  28. FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
  29. // Rounding up >0xfffff000 wraps back to 0. That's never what we want.
  30. VERIFY(x == 0 || rounded != 0);
  31. return rounded;
  32. }
  33. constexpr FlatPtr page_round_down(FlatPtr x)
  34. {
  35. return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
  36. }
  37. inline FlatPtr low_physical_to_virtual(FlatPtr physical)
  38. {
  39. return physical + kernel_base;
  40. }
  41. inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
  42. {
  43. return virtual_ - kernel_base;
  44. }
  45. enum class UsedMemoryRangeType {
  46. LowMemory = 0,
  47. Prekernel,
  48. Kernel,
  49. BootModule,
  50. PhysicalPages,
  51. };
  52. static constexpr StringView UserMemoryRangeTypeNames[] {
  53. "Low memory",
  54. "Prekernel",
  55. "Kernel",
  56. "Boot module",
  57. "Physical Pages"
  58. };
  59. struct UsedMemoryRange {
  60. UsedMemoryRangeType type {};
  61. PhysicalAddress start;
  62. PhysicalAddress end;
  63. };
  64. struct ContiguousReservedMemoryRange {
  65. PhysicalAddress start;
  66. PhysicalSize length {};
  67. };
  68. enum class PhysicalMemoryRangeType {
  69. Usable = 0,
  70. Reserved,
  71. ACPI_Reclaimable,
  72. ACPI_NVS,
  73. BadMemory,
  74. Unknown,
  75. };
  76. struct PhysicalMemoryRange {
  77. PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
  78. PhysicalAddress start;
  79. PhysicalSize length {};
  80. };
  81. #define MM Kernel::MemoryManager::the()
  82. struct MemoryManagerData {
  83. SpinLock<u8> m_quickmap_in_use;
  84. u32 m_quickmap_prev_flags;
  85. PhysicalAddress m_last_quickmap_pd;
  86. PhysicalAddress m_last_quickmap_pt;
  87. };
  88. extern RecursiveSpinLock s_mm_lock;
  89. class MemoryManager {
  90. AK_MAKE_ETERNAL
  91. friend class PageDirectory;
  92. friend class AnonymousVMObject;
  93. friend class Region;
  94. friend class VMObject;
  95. public:
  96. static MemoryManager& the();
  97. static bool is_initialized();
  98. static void initialize(u32 cpu);
  99. static inline MemoryManagerData& get_data()
  100. {
  101. return Processor::current().get_mm_data();
  102. }
  103. PageFaultResponse handle_page_fault(PageFault const&);
  104. void set_page_writable_direct(VirtualAddress, bool);
  105. void protect_readonly_after_init_memory();
  106. void unmap_text_after_init();
  107. void unmap_ksyms_after_init();
  108. static void enter_process_paging_scope(Process&);
  109. static void enter_space(Space&);
  110. bool validate_user_stack_no_lock(Space&, VirtualAddress) const;
  111. bool validate_user_stack(Space&, VirtualAddress) const;
  112. enum class ShouldZeroFill {
  113. No,
  114. Yes
  115. };
  116. bool commit_user_physical_pages(size_t);
  117. void uncommit_user_physical_pages(size_t);
  118. NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
  119. RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
  120. RefPtr<PhysicalPage> allocate_supervisor_physical_page();
  121. NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
  122. void deallocate_physical_page(PhysicalAddress);
  123. OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  124. OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
  125. OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  126. OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  127. OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  128. OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  129. struct SystemMemoryInfo {
  130. PhysicalSize user_physical_pages { 0 };
  131. PhysicalSize user_physical_pages_used { 0 };
  132. PhysicalSize user_physical_pages_committed { 0 };
  133. PhysicalSize user_physical_pages_uncommitted { 0 };
  134. PhysicalSize super_physical_pages { 0 };
  135. PhysicalSize super_physical_pages_used { 0 };
  136. };
  137. SystemMemoryInfo get_system_memory_info()
  138. {
  139. ScopedSpinLock lock(s_mm_lock);
  140. return m_system_memory_info;
  141. }
  142. template<IteratorFunction<VMObject&> Callback>
  143. static void for_each_vmobject(Callback callback)
  144. {
  145. for (auto& vmobject : MM.m_vmobjects) {
  146. if (callback(vmobject) == IterationDecision::Break)
  147. break;
  148. }
  149. }
  150. template<VoidFunction<VMObject&> Callback>
  151. static void for_each_vmobject(Callback callback)
  152. {
  153. for (auto& vmobject : MM.m_vmobjects)
  154. callback(vmobject);
  155. }
  156. static Region* find_user_region_from_vaddr(Space&, VirtualAddress);
  157. static Region* find_user_region_from_vaddr_no_lock(Space&, VirtualAddress);
  158. static void validate_syscall_preconditions(Space&, RegisterState&);
  159. void dump_kernel_regions();
  160. PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
  161. PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
  162. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  163. Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
  164. bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const;
  165. PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
  166. PhysicalAddress get_physical_address(PhysicalPage const&);
  167. private:
  168. MemoryManager();
  169. ~MemoryManager();
  170. void initialize_physical_pages();
  171. void register_reserved_ranges();
  172. void register_vmobject(VMObject&);
  173. void unregister_vmobject(VMObject&);
  174. void register_region(Region&);
  175. void unregister_region(Region&);
  176. void protect_kernel_image();
  177. void parse_memory_map();
  178. static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
  179. static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
  180. static Region* kernel_region_from_vaddr(VirtualAddress);
  181. static Region* find_region_from_vaddr(VirtualAddress);
  182. RefPtr<PhysicalPage> find_free_user_physical_page(bool);
  183. ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
  184. {
  185. return quickmap_page(page.paddr());
  186. }
  187. u8* quickmap_page(PhysicalAddress const&);
  188. void unquickmap_page();
  189. PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
  190. PageTableEntry* quickmap_pt(PhysicalAddress);
  191. PageTableEntry* pte(PageDirectory&, VirtualAddress);
  192. PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
  193. void release_pte(PageDirectory&, VirtualAddress, bool);
  194. RefPtr<PageDirectory> m_kernel_page_directory;
  195. RefPtr<PhysicalPage> m_shared_zero_page;
  196. RefPtr<PhysicalPage> m_lazy_committed_page;
  197. SystemMemoryInfo m_system_memory_info;
  198. NonnullOwnPtrVector<PhysicalRegion> m_user_physical_regions;
  199. NonnullOwnPtrVector<PhysicalRegion> m_super_physical_regions;
  200. OwnPtr<PhysicalRegion> m_physical_pages_region;
  201. PhysicalPageEntry* m_physical_page_entries { nullptr };
  202. size_t m_physical_page_entries_count { 0 };
  203. Region::List m_user_regions;
  204. Region::List m_kernel_regions;
  205. Vector<UsedMemoryRange> m_used_memory_ranges;
  206. Vector<PhysicalMemoryRange> m_physical_memory_ranges;
  207. Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
  208. VMObject::List m_vmobjects;
  209. };
  210. template<typename Callback>
  211. void VMObject::for_each_region(Callback callback)
  212. {
  213. ScopedSpinLock lock(s_mm_lock);
  214. // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
  215. // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
  216. for (auto& region : MM.m_user_regions) {
  217. if (&region.vmobject() == this)
  218. callback(region);
  219. }
  220. for (auto& region : MM.m_kernel_regions) {
  221. if (&region.vmobject() == this)
  222. callback(region);
  223. }
  224. }
  225. inline bool is_user_address(VirtualAddress vaddr)
  226. {
  227. return vaddr.get() < USER_RANGE_CEILING;
  228. }
  229. inline bool is_user_range(VirtualAddress vaddr, size_t size)
  230. {
  231. if (vaddr.offset(size) < vaddr)
  232. return false;
  233. return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
  234. }
  235. inline bool is_user_range(Range const& range)
  236. {
  237. return is_user_range(range.base(), range.size());
  238. }
  239. inline bool PhysicalPage::is_shared_zero_page() const
  240. {
  241. return this == &MM.shared_zero_page();
  242. }
  243. inline bool PhysicalPage::is_lazy_committed_page() const
  244. {
  245. return this == &MM.lazy_committed_page();
  246. }
  247. }