MemoryManager.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/Concepts.h>
  8. #include <AK/HashTable.h>
  9. #include <AK/NonnullOwnPtrVector.h>
  10. #include <AK/NonnullRefPtrVector.h>
  11. #include <Kernel/Forward.h>
  12. #include <Kernel/Locking/Spinlock.h>
  13. #include <Kernel/Memory/AllocationStrategy.h>
  14. #include <Kernel/Memory/PhysicalPage.h>
  15. #include <Kernel/Memory/PhysicalRegion.h>
  16. #include <Kernel/Memory/Region.h>
  17. #include <Kernel/Memory/VMObject.h>
  18. namespace Kernel {
  19. class PageDirectoryEntry;
  20. }
  21. namespace Kernel::Memory {
  22. constexpr bool page_round_up_would_wrap(FlatPtr x)
  23. {
  24. return x > (explode_byte(0xFF) & ~0xFFF);
  25. }
  26. constexpr FlatPtr page_round_up(FlatPtr x)
  27. {
  28. FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
  29. // Rounding up >0xfffff000 wraps back to 0. That's never what we want.
  30. VERIFY(x == 0 || rounded != 0);
  31. return rounded;
  32. }
  33. constexpr FlatPtr page_round_down(FlatPtr x)
  34. {
  35. return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
  36. }
  37. inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
  38. {
  39. return virtual_ - physical_to_virtual_offset;
  40. }
  41. enum class UsedMemoryRangeType {
  42. LowMemory = 0,
  43. Prekernel,
  44. Kernel,
  45. BootModule,
  46. PhysicalPages,
  47. };
  48. static constexpr StringView UserMemoryRangeTypeNames[] {
  49. "Low memory",
  50. "Prekernel",
  51. "Kernel",
  52. "Boot module",
  53. "Physical Pages"
  54. };
  55. struct UsedMemoryRange {
  56. UsedMemoryRangeType type {};
  57. PhysicalAddress start;
  58. PhysicalAddress end;
  59. };
  60. struct ContiguousReservedMemoryRange {
  61. PhysicalAddress start;
  62. PhysicalSize length {};
  63. };
  64. enum class PhysicalMemoryRangeType {
  65. Usable = 0,
  66. Reserved,
  67. ACPI_Reclaimable,
  68. ACPI_NVS,
  69. BadMemory,
  70. Unknown,
  71. };
  72. struct PhysicalMemoryRange {
  73. PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
  74. PhysicalAddress start;
  75. PhysicalSize length {};
  76. };
  77. #define MM Kernel::Memory::MemoryManager::the()
  78. struct MemoryManagerData {
  79. static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
  80. Spinlock m_quickmap_in_use;
  81. u32 m_quickmap_prev_flags;
  82. PhysicalAddress m_last_quickmap_pd;
  83. PhysicalAddress m_last_quickmap_pt;
  84. };
  85. // NOLINTNEXTLINE(readability-redundant-declaration) FIXME: Why do we declare this here *and* in Thread.h?
  86. extern RecursiveSpinlock s_mm_lock;
  87. // This class represents a set of committed physical pages.
  88. // When you ask MemoryManager to commit pages for you, you get one of these in return.
  89. // You can allocate pages from it via `take_one()`
  90. // It will uncommit any (unallocated) remaining pages when destroyed.
  91. class CommittedPhysicalPageSet {
  92. AK_MAKE_NONCOPYABLE(CommittedPhysicalPageSet);
  93. public:
  94. CommittedPhysicalPageSet(Badge<MemoryManager>, size_t page_count)
  95. : m_page_count(page_count)
  96. {
  97. }
  98. CommittedPhysicalPageSet(CommittedPhysicalPageSet&& other)
  99. : m_page_count(exchange(other.m_page_count, 0))
  100. {
  101. }
  102. ~CommittedPhysicalPageSet();
  103. bool is_empty() const { return m_page_count == 0; }
  104. size_t page_count() const { return m_page_count; }
  105. [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
  106. void uncommit_one();
  107. void operator=(CommittedPhysicalPageSet&&) = delete;
  108. private:
  109. size_t m_page_count { 0 };
  110. };
  111. class MemoryManager {
  112. AK_MAKE_ETERNAL
  113. friend class PageDirectory;
  114. friend class AnonymousVMObject;
  115. friend class Region;
  116. friend class VMObject;
  117. public:
  118. static MemoryManager& the();
  119. static bool is_initialized();
  120. static void initialize(u32 cpu);
  121. static inline MemoryManagerData& get_data()
  122. {
  123. return ProcessorSpecific<MemoryManagerData>::get();
  124. }
  125. PageFaultResponse handle_page_fault(PageFault const&);
  126. void set_page_writable_direct(VirtualAddress, bool);
  127. void protect_readonly_after_init_memory();
  128. void unmap_text_after_init();
  129. void unmap_ksyms_after_init();
  130. static void enter_process_address_space(Process&);
  131. static void enter_address_space(AddressSpace&);
  132. bool validate_user_stack_no_lock(AddressSpace&, VirtualAddress) const;
  133. bool validate_user_stack(AddressSpace&, VirtualAddress) const;
  134. enum class ShouldZeroFill {
  135. No,
  136. Yes
  137. };
  138. ErrorOr<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
  139. void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
  140. NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
  141. RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
  142. RefPtr<PhysicalPage> allocate_supervisor_physical_page();
  143. NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
  144. void deallocate_physical_page(PhysicalAddress);
  145. ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  146. ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
  147. ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  148. ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  149. ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
  150. struct SystemMemoryInfo {
  151. PhysicalSize user_physical_pages { 0 };
  152. PhysicalSize user_physical_pages_used { 0 };
  153. PhysicalSize user_physical_pages_committed { 0 };
  154. PhysicalSize user_physical_pages_uncommitted { 0 };
  155. PhysicalSize super_physical_pages { 0 };
  156. PhysicalSize super_physical_pages_used { 0 };
  157. };
  158. SystemMemoryInfo get_system_memory_info()
  159. {
  160. SpinlockLocker lock(s_mm_lock);
  161. return m_system_memory_info;
  162. }
  163. template<IteratorFunction<VMObject&> Callback>
  164. static void for_each_vmobject(Callback callback)
  165. {
  166. VMObject::all_instances().with([&](auto& list) {
  167. for (auto& vmobject : list) {
  168. if (callback(vmobject) == IterationDecision::Break)
  169. break;
  170. }
  171. });
  172. }
  173. template<VoidFunction<VMObject&> Callback>
  174. static void for_each_vmobject(Callback callback)
  175. {
  176. VMObject::all_instances().with([&](auto& list) {
  177. for (auto& vmobject : list) {
  178. callback(vmobject);
  179. }
  180. });
  181. }
  182. static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress);
  183. static Region* find_user_region_from_vaddr_no_lock(AddressSpace&, VirtualAddress);
  184. static void validate_syscall_preconditions(AddressSpace&, RegisterState const&);
  185. void dump_kernel_regions();
  186. PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
  187. PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
  188. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  189. Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
  190. bool is_allowed_to_mmap_to_userspace(PhysicalAddress, VirtualRange const&) const;
  191. PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
  192. PhysicalAddress get_physical_address(PhysicalPage const&);
  193. void copy_physical_page(PhysicalPage&, u8 page_buffer[PAGE_SIZE]);
  194. private:
  195. MemoryManager();
  196. ~MemoryManager();
  197. void initialize_physical_pages();
  198. void register_reserved_ranges();
  199. void register_region(Region&);
  200. void unregister_region(Region&);
  201. void protect_kernel_image();
  202. void parse_memory_map();
  203. static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
  204. static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
  205. static Region* kernel_region_from_vaddr(VirtualAddress);
  206. static Region* find_region_from_vaddr(VirtualAddress);
  207. RefPtr<PhysicalPage> find_free_user_physical_page(bool);
  208. ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
  209. {
  210. return quickmap_page(page.paddr());
  211. }
  212. u8* quickmap_page(PhysicalAddress const&);
  213. void unquickmap_page();
  214. PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
  215. PageTableEntry* quickmap_pt(PhysicalAddress);
  216. PageTableEntry* pte(PageDirectory&, VirtualAddress);
  217. PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
  218. void release_pte(PageDirectory&, VirtualAddress, bool);
  219. RefPtr<PageDirectory> m_kernel_page_directory;
  220. RefPtr<PhysicalPage> m_shared_zero_page;
  221. RefPtr<PhysicalPage> m_lazy_committed_page;
  222. SystemMemoryInfo m_system_memory_info;
  223. NonnullOwnPtrVector<PhysicalRegion> m_user_physical_regions;
  224. OwnPtr<PhysicalRegion> m_super_physical_region;
  225. OwnPtr<PhysicalRegion> m_physical_pages_region;
  226. PhysicalPageEntry* m_physical_page_entries { nullptr };
  227. size_t m_physical_page_entries_count { 0 };
  228. RedBlackTree<FlatPtr, Region*> m_kernel_regions;
  229. Vector<UsedMemoryRange> m_used_memory_ranges;
  230. Vector<PhysicalMemoryRange> m_physical_memory_ranges;
  231. Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
  232. };
  233. inline bool is_user_address(VirtualAddress vaddr)
  234. {
  235. return vaddr.get() < USER_RANGE_CEILING;
  236. }
  237. inline bool is_user_range(VirtualAddress vaddr, size_t size)
  238. {
  239. if (vaddr.offset(size) < vaddr)
  240. return false;
  241. if (!is_user_address(vaddr))
  242. return false;
  243. if (size <= 1)
  244. return true;
  245. return is_user_address(vaddr.offset(size - 1));
  246. }
  247. inline bool is_user_range(VirtualRange const& range)
  248. {
  249. return is_user_range(range.base(), range.size());
  250. }
  251. inline bool PhysicalPage::is_shared_zero_page() const
  252. {
  253. return this == &MM.shared_zero_page();
  254. }
  255. inline bool PhysicalPage::is_lazy_committed_page() const
  256. {
  257. return this == &MM.lazy_committed_page();
  258. }
  259. inline ErrorOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size)
  260. {
  261. if (Memory::page_round_up_would_wrap(size))
  262. return EINVAL;
  263. if ((address + size) < address)
  264. return EINVAL;
  265. if (Memory::page_round_up_would_wrap(address + size))
  266. return EINVAL;
  267. auto base = VirtualAddress { address }.page_base();
  268. auto end = Memory::page_round_up(address + size);
  269. return Memory::VirtualRange { base, end - base.get() };
  270. }
  271. }