MemoryManager.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/HashTable.h>
  28. #include <AK/NonnullRefPtrVector.h>
  29. #include <AK/String.h>
  30. #include <Kernel/Arch/i386/CPU.h>
  31. #include <Kernel/Forward.h>
  32. #include <Kernel/SpinLock.h>
  33. #include <Kernel/VM/AllocationStrategy.h>
  34. #include <Kernel/VM/PhysicalPage.h>
  35. #include <Kernel/VM/Region.h>
  36. #include <Kernel/VM/VMObject.h>
  37. namespace Kernel {
  38. #define PAGE_ROUND_UP(x) ((((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
  39. #define PAGE_ROUND_DOWN(x) (((FlatPtr)(x)) & ~(PAGE_SIZE - 1))
  40. template<typename T>
  41. inline T* low_physical_to_virtual(T* physical)
  42. {
  43. return (T*)(((u8*)physical) + 0xc0000000);
  44. }
  45. inline u32 low_physical_to_virtual(u32 physical)
  46. {
  47. return physical + 0xc0000000;
  48. }
  49. template<typename T>
  50. inline T* virtual_to_low_physical(T* physical)
  51. {
  52. return (T*)(((u8*)physical) - 0xc0000000);
  53. }
  54. inline u32 virtual_to_low_physical(u32 physical)
  55. {
  56. return physical - 0xc0000000;
  57. }
  58. class KBuffer;
  59. class SynthFSInode;
  60. #define MM Kernel::MemoryManager::the()
  61. struct MemoryManagerData {
  62. SpinLock<u8> m_quickmap_in_use;
  63. u32 m_quickmap_prev_flags;
  64. PhysicalAddress m_last_quickmap_pd;
  65. PhysicalAddress m_last_quickmap_pt;
  66. };
  67. extern RecursiveSpinLock s_mm_lock;
  68. class MemoryManager {
  69. AK_MAKE_ETERNAL
  70. friend class PageDirectory;
  71. friend class PhysicalPage;
  72. friend class PhysicalRegion;
  73. friend class AnonymousVMObject;
  74. friend class Region;
  75. friend class VMObject;
  76. friend OwnPtr<KBuffer> procfs$memstat(InodeIdentifier);
  77. public:
  78. static MemoryManager& the();
  79. static bool is_initialized();
  80. static void initialize(u32 cpu);
  81. static inline MemoryManagerData& get_data()
  82. {
  83. return Processor::current().get_mm_data();
  84. }
  85. PageFaultResponse handle_page_fault(const PageFault&);
  86. void enter_process_paging_scope(Process&);
  87. bool validate_user_stack(const Process&, VirtualAddress) const;
  88. enum class ShouldZeroFill {
  89. No,
  90. Yes
  91. };
  92. bool commit_user_physical_pages(size_t);
  93. void uncommit_user_physical_pages(size_t);
  94. NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
  95. RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
  96. RefPtr<PhysicalPage> allocate_supervisor_physical_page();
  97. NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
  98. void deallocate_user_physical_page(const PhysicalPage&);
  99. void deallocate_supervisor_physical_page(const PhysicalPage&);
  100. OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
  101. OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, AllocationStrategy strategy = AllocationStrategy::Reserve, bool cacheable = true);
  102. OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
  103. OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
  104. OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
  105. OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
  106. OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true);
  107. unsigned user_physical_pages() const { return m_user_physical_pages; }
  108. unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
  109. unsigned user_physical_pages_committed() const { return m_user_physical_pages_committed; }
  110. unsigned user_physical_pages_uncommitted() const { return m_user_physical_pages_uncommitted; }
  111. unsigned super_physical_pages() const { return m_super_physical_pages; }
  112. unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
  113. template<typename Callback>
  114. static void for_each_vmobject(Callback callback)
  115. {
  116. for (auto& vmobject : MM.m_vmobjects) {
  117. if (callback(vmobject) == IterationDecision::Break)
  118. break;
  119. }
  120. }
  121. template<typename T, typename Callback>
  122. static void for_each_vmobject_of_type(Callback callback)
  123. {
  124. for (auto& vmobject : MM.m_vmobjects) {
  125. if (!is<T>(vmobject))
  126. continue;
  127. if (callback(static_cast<T&>(vmobject)) == IterationDecision::Break)
  128. break;
  129. }
  130. }
  131. static Region* find_region_from_vaddr(Process&, VirtualAddress);
  132. static const Region* find_region_from_vaddr(const Process&, VirtualAddress);
  133. void dump_kernel_regions();
  134. PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
  135. PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
  136. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  137. private:
  138. MemoryManager();
  139. ~MemoryManager();
  140. enum class AccessSpace { Kernel,
  141. User };
  142. enum class AccessType { Read,
  143. Write };
  144. template<AccessSpace, AccessType>
  145. bool validate_range(const Process&, VirtualAddress, size_t) const;
  146. void register_vmobject(VMObject&);
  147. void unregister_vmobject(VMObject&);
  148. void register_region(Region&);
  149. void unregister_region(Region&);
  150. void detect_cpu_features();
  151. void protect_kernel_image();
  152. void parse_memory_map();
  153. static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
  154. static void flush_tlb(const PageDirectory*, VirtualAddress, size_t page_count = 1);
  155. static Region* user_region_from_vaddr(Process&, VirtualAddress);
  156. static Region* kernel_region_from_vaddr(VirtualAddress);
  157. static Region* find_region_from_vaddr(VirtualAddress);
  158. RefPtr<PhysicalPage> find_free_user_physical_page(bool);
  159. u8* quickmap_page(PhysicalPage&);
  160. void unquickmap_page();
  161. PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
  162. PageTableEntry* quickmap_pt(PhysicalAddress);
  163. PageTableEntry* pte(PageDirectory&, VirtualAddress);
  164. PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
  165. void release_pte(PageDirectory&, VirtualAddress, bool);
  166. RefPtr<PageDirectory> m_kernel_page_directory;
  167. RefPtr<PhysicalPage> m_low_page_table;
  168. RefPtr<PhysicalPage> m_shared_zero_page;
  169. RefPtr<PhysicalPage> m_lazy_committed_page;
  170. Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages { 0 };
  171. Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_used { 0 };
  172. Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_committed { 0 };
  173. Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_uncommitted { 0 };
  174. Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_super_physical_pages { 0 };
  175. Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_super_physical_pages_used { 0 };
  176. NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
  177. NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
  178. InlineLinkedList<Region> m_user_regions;
  179. InlineLinkedList<Region> m_kernel_regions;
  180. InlineLinkedList<VMObject> m_vmobjects;
  181. RefPtr<PhysicalPage> m_low_pseudo_identity_mapping_pages[4];
  182. };
  183. template<typename Callback>
  184. void VMObject::for_each_region(Callback callback)
  185. {
  186. ScopedSpinLock lock(s_mm_lock);
  187. // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
  188. // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
  189. for (auto& region : MM.m_user_regions) {
  190. if (&region.vmobject() == this)
  191. callback(region);
  192. }
  193. for (auto& region : MM.m_kernel_regions) {
  194. if (&region.vmobject() == this)
  195. callback(region);
  196. }
  197. }
  198. inline bool is_user_address(VirtualAddress vaddr)
  199. {
  200. return vaddr.get() < 0xc0000000;
  201. }
  202. inline bool is_user_range(VirtualAddress vaddr, size_t size)
  203. {
  204. if (vaddr.offset(size) < vaddr)
  205. return false;
  206. return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
  207. }
  208. inline bool PhysicalPage::is_shared_zero_page() const
  209. {
  210. return this == &MM.shared_zero_page();
  211. }
  212. inline bool PhysicalPage::is_lazy_committed_page() const
  213. {
  214. return this == &MM.lazy_committed_page();
  215. }
  216. }