MemoryManager.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #pragma once
  2. #include <AK/Badge.h>
  3. #include <AK/Bitmap.h>
  4. #include <AK/ByteBuffer.h>
  5. #include <AK/HashTable.h>
  6. #include <AK/NonnullRefPtrVector.h>
  7. #include <AK/RefCounted.h>
  8. #include <AK/RefPtr.h>
  9. #include <AK/String.h>
  10. #include <AK/Types.h>
  11. #include <AK/Vector.h>
  12. #include <AK/Weakable.h>
  13. #include <Kernel/Arch/i386/CPU.h>
  14. #include <Kernel/FileSystem/InodeIdentifier.h>
  15. #include <Kernel/VM/PhysicalPage.h>
  16. #include <Kernel/VM/PhysicalRegion.h>
  17. #include <Kernel/VM/Region.h>
  18. #include <Kernel/VM/VMObject.h>
  19. #define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
  20. template<typename T>
  21. inline T* low_physical_to_virtual(T* physical)
  22. {
  23. return (T*)(((u8*)physical) + 0xc0000000);
  24. }
  25. inline u32 low_physical_to_virtual(u32 physical)
  26. {
  27. return physical + 0xc0000000;
  28. }
  29. template<typename T>
  30. inline T* virtual_to_low_physical(T* physical)
  31. {
  32. return (T*)(((u8*)physical) - 0xc0000000);
  33. }
  34. inline u32 virtual_to_low_physical(u32 physical)
  35. {
  36. return physical - 0xc0000000;
  37. }
  38. class KBuffer;
  39. class SynthFSInode;
  40. #define MM MemoryManager::the()
  41. class MemoryManager {
  42. AK_MAKE_ETERNAL
  43. friend class PageDirectory;
  44. friend class PhysicalPage;
  45. friend class PhysicalRegion;
  46. friend class Region;
  47. friend class VMObject;
  48. friend Optional<KBuffer> procfs$mm(InodeIdentifier);
  49. friend Optional<KBuffer> procfs$memstat(InodeIdentifier);
  50. public:
  51. static MemoryManager& the();
  52. static void initialize();
  53. PageFaultResponse handle_page_fault(const PageFault&);
  54. void enter_process_paging_scope(Process&);
  55. bool validate_user_stack(const Process&, VirtualAddress) const;
  56. bool validate_user_read(const Process&, VirtualAddress, size_t) const;
  57. bool validate_user_write(const Process&, VirtualAddress, size_t) const;
  58. bool validate_kernel_read(const Process&, VirtualAddress, size_t) const;
  59. enum class ShouldZeroFill {
  60. No,
  61. Yes
  62. };
  63. RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
  64. RefPtr<PhysicalPage> allocate_supervisor_physical_page();
  65. void deallocate_user_physical_page(PhysicalPage&&);
  66. void deallocate_supervisor_physical_page(PhysicalPage&&);
  67. void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false);
  68. OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
  69. OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
  70. OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
  71. OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false);
  72. unsigned user_physical_pages() const { return m_user_physical_pages; }
  73. unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
  74. unsigned super_physical_pages() const { return m_super_physical_pages; }
  75. unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
  76. template<typename Callback>
  77. static void for_each_vmobject(Callback callback)
  78. {
  79. for (auto& vmobject : MM.m_vmobjects) {
  80. if (callback(vmobject) == IterationDecision::Break)
  81. break;
  82. }
  83. }
  84. static Region* region_from_vaddr(Process&, VirtualAddress);
  85. static const Region* region_from_vaddr(const Process&, VirtualAddress);
  86. void dump_kernel_regions();
  87. private:
  88. MemoryManager();
  89. ~MemoryManager();
  90. enum class AccessSpace { Kernel, User };
  91. enum class AccessType { Read, Write };
  92. template<AccessSpace, AccessType>
  93. bool validate_range(const Process&, VirtualAddress, size_t) const;
  94. void register_vmobject(VMObject&);
  95. void unregister_vmobject(VMObject&);
  96. void register_region(Region&);
  97. void unregister_region(Region&);
  98. void detect_cpu_features();
  99. void setup_low_1mb();
  100. void protect_kernel_image();
  101. void parse_memory_map();
  102. void flush_entire_tlb();
  103. void flush_tlb(VirtualAddress);
  104. static Region* user_region_from_vaddr(Process&, VirtualAddress);
  105. static Region* kernel_region_from_vaddr(VirtualAddress);
  106. static Region* region_from_vaddr(VirtualAddress);
  107. RefPtr<PhysicalPage> find_free_user_physical_page();
  108. u8* quickmap_page(PhysicalPage&);
  109. void unquickmap_page();
  110. PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
  111. PageTableEntry* quickmap_pt(PhysicalAddress);
  112. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  113. PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
  114. RefPtr<PageDirectory> m_kernel_page_directory;
  115. RefPtr<PhysicalPage> m_low_page_table;
  116. unsigned m_user_physical_pages { 0 };
  117. unsigned m_user_physical_pages_used { 0 };
  118. unsigned m_super_physical_pages { 0 };
  119. unsigned m_super_physical_pages_used { 0 };
  120. NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
  121. NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
  122. InlineLinkedList<Region> m_user_regions;
  123. InlineLinkedList<Region> m_kernel_regions;
  124. InlineLinkedList<VMObject> m_vmobjects;
  125. bool m_quickmap_in_use { false };
  126. };
  127. struct ProcessPagingScope {
  128. ProcessPagingScope(Process&);
  129. ~ProcessPagingScope();
  130. };
  131. template<typename Callback>
  132. void VMObject::for_each_region(Callback callback)
  133. {
  134. // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
  135. // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
  136. for (auto& region : MM.m_user_regions) {
  137. if (&region.vmobject() == this)
  138. callback(region);
  139. }
  140. for (auto& region : MM.m_kernel_regions) {
  141. if (&region.vmobject() == this)
  142. callback(region);
  143. }
  144. }
  145. inline bool is_user_address(VirtualAddress vaddr)
  146. {
  147. return vaddr.get() >= (8 * MB) && vaddr.get() < 0xc0000000;
  148. }