MemoryManager.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. #pragma once
  2. #include "types.h"
  3. #include "i386.h"
  4. #include <AK/Bitmap.h>
  5. #include <AK/ByteBuffer.h>
  6. #include <AK/Retainable.h>
  7. #include <AK/RetainPtr.h>
  8. #include <AK/Vector.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/AKString.h>
  11. #include <VirtualFileSystem/VirtualFileSystem.h>
  12. #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
  13. class Process;
  14. extern Process* current;
  15. class SynthFSInode;
  16. enum class PageFaultResponse {
  17. ShouldCrash,
  18. Continue,
  19. };
  20. class PhysicalPage {
  21. AK_MAKE_ETERNAL
  22. friend class MemoryManager;
  23. friend class PageDirectory;
  24. friend class VMObject;
  25. public:
  26. PhysicalAddress paddr() const { return m_paddr; }
  27. void retain()
  28. {
  29. ASSERT(m_retain_count);
  30. ++m_retain_count;
  31. }
  32. void release()
  33. {
  34. ASSERT(m_retain_count);
  35. if (!--m_retain_count)
  36. return_to_freelist();
  37. }
  38. unsigned short retain_count() const { return m_retain_count; }
  39. private:
  40. PhysicalPage(PhysicalAddress paddr, bool supervisor);
  41. ~PhysicalPage() = delete;
  42. void return_to_freelist();
  43. unsigned short m_retain_count { 1 };
  44. bool m_supervisor { false };
  45. PhysicalAddress m_paddr;
  46. };
  47. class PageDirectory : public Retainable<PageDirectory> {
  48. friend class MemoryManager;
  49. public:
  50. static RetainPtr<PageDirectory> create() { return adopt(*new PageDirectory); }
  51. static RetainPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
  52. ~PageDirectory();
  53. dword cr3() const { return m_directory_page->paddr().get(); }
  54. dword* entries() { return reinterpret_cast<dword*>(cr3()); }
  55. void flush(LinearAddress);
  56. private:
  57. PageDirectory();
  58. explicit PageDirectory(PhysicalAddress);
  59. RetainPtr<PhysicalPage> m_directory_page;
  60. HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
  61. };
  62. class VMObject : public Retainable<VMObject> {
  63. public:
  64. static RetainPtr<VMObject> create_file_backed(RetainPtr<Inode>&&, size_t);
  65. static RetainPtr<VMObject> create_anonymous(size_t);
  66. static RetainPtr<VMObject> create_framebuffer_wrapper(PhysicalAddress, size_t);
  67. RetainPtr<VMObject> clone();
  68. ~VMObject();
  69. bool is_anonymous() const { return m_anonymous; }
  70. Inode* inode() { return m_inode.ptr(); }
  71. const Inode* inode() const { return m_inode.ptr(); }
  72. size_t inode_offset() const { return m_inode_offset; }
  73. String name() const { return m_name; }
  74. void set_name(const String& name) { m_name = name; }
  75. size_t page_count() const { return m_size / PAGE_SIZE; }
  76. const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
  77. Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
  78. private:
  79. VMObject(RetainPtr<Inode>&&, size_t);
  80. explicit VMObject(VMObject&);
  81. explicit VMObject(size_t);
  82. VMObject(PhysicalAddress, size_t);
  83. String m_name;
  84. bool m_anonymous { false };
  85. Unix::off_t m_inode_offset { 0 };
  86. size_t m_size { 0 };
  87. RetainPtr<Inode> m_inode;
  88. Vector<RetainPtr<PhysicalPage>> m_physical_pages;
  89. };
  90. class Region : public Retainable<Region> {
  91. public:
  92. Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
  93. Region(LinearAddress, size_t, RetainPtr<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
  94. Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
  95. ~Region();
  96. const VMObject& vmo() const { return *m_vmo; }
  97. VMObject& vmo() { return *m_vmo; }
  98. void set_shared(bool shared) { m_shared = shared; }
  99. RetainPtr<Region> clone();
  100. bool contains(LinearAddress laddr) const
  101. {
  102. return laddr >= linearAddress && laddr < linearAddress.offset(size);
  103. }
  104. unsigned page_index_from_address(LinearAddress laddr) const
  105. {
  106. return (laddr - linearAddress).get() / PAGE_SIZE;
  107. }
  108. size_t first_page_index() const
  109. {
  110. return m_offset_in_vmo / PAGE_SIZE;
  111. }
  112. size_t last_page_index() const
  113. {
  114. return (first_page_index() + page_count()) - 1;
  115. }
  116. size_t page_count() const
  117. {
  118. return size / PAGE_SIZE;
  119. }
  120. bool page_in();
  121. int commit();
  122. size_t committed() const;
  123. RetainPtr<PageDirectory> m_page_directory;
  124. LinearAddress linearAddress;
  125. size_t size { 0 };
  126. size_t m_offset_in_vmo { 0 };
  127. RetainPtr<VMObject> m_vmo;
  128. String name;
  129. bool is_readable { true };
  130. bool is_writable { true };
  131. bool m_shared { false };
  132. Bitmap cow_map;
  133. };
  134. #define MM MemoryManager::the()
  135. class MemoryManager {
  136. AK_MAKE_ETERNAL
  137. friend class PageDirectory;
  138. friend class PhysicalPage;
  139. friend class Region;
  140. friend class VMObject;
  141. friend ByteBuffer procfs$mm(SynthFSInode&);
  142. public:
  143. static MemoryManager& the() PURE;
  144. static void initialize();
  145. PageFaultResponse handle_page_fault(const PageFault&);
  146. bool map_region(Process&, Region&);
  147. bool unmap_region(Region&);
  148. void populate_page_directory(PageDirectory&);
  149. void enter_process_paging_scope(Process&);
  150. bool validate_user_read(const Process&, LinearAddress) const;
  151. bool validate_user_write(const Process&, LinearAddress) const;
  152. RetainPtr<PhysicalPage> allocate_physical_page();
  153. RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
  154. void remap_region(Process&, Region&);
  155. private:
  156. MemoryManager();
  157. ~MemoryManager();
  158. void register_vmo(VMObject&);
  159. void unregister_vmo(VMObject&);
  160. void register_region(Region&);
  161. void unregister_region(Region&);
  162. void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
  163. void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
  164. void initialize_paging();
  165. void flush_entire_tlb();
  166. void flush_tlb(LinearAddress);
  167. RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
  168. void map_protected(LinearAddress, size_t length);
  169. void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
  170. void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
  171. static Region* region_from_laddr(Process&, LinearAddress);
  172. bool copy_on_write(Region&, unsigned page_index_in_region);
  173. bool page_in_from_inode(Region&, unsigned page_index_in_region);
  174. bool zero_page(Region& region, unsigned page_index_in_region);
  175. byte* quickmap_page(PhysicalPage&);
  176. void unquickmap_page();
  177. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  178. struct PageDirectoryEntry {
  179. explicit PageDirectoryEntry(dword* pde) : m_pde(pde) { }
  180. dword* pageTableBase() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  181. void setPageTableBase(dword value)
  182. {
  183. *m_pde &= 0xfff;
  184. *m_pde |= value & 0xfffff000;
  185. }
  186. dword raw() const { return *m_pde; }
  187. dword* ptr() { return m_pde; }
  188. enum Flags {
  189. Present = 1 << 0,
  190. ReadWrite = 1 << 1,
  191. UserSupervisor = 1 << 2,
  192. };
  193. bool is_present() const { return raw() & Present; }
  194. void set_present(bool b) { set_bit(Present, b); }
  195. bool is_user_allowed() const { return raw() & UserSupervisor; }
  196. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  197. bool is_writable() const { return raw() & ReadWrite; }
  198. void set_writable(bool b) { set_bit(ReadWrite, b); }
  199. void set_bit(byte bit, bool value)
  200. {
  201. if (value)
  202. *m_pde |= bit;
  203. else
  204. *m_pde &= ~bit;
  205. }
  206. dword* m_pde;
  207. };
  208. struct PageTableEntry {
  209. explicit PageTableEntry(dword* pte) : m_pte(pte) { }
  210. dword* physical_page_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  211. void set_physical_page_base(dword value)
  212. {
  213. *m_pte &= 0xfffu;
  214. *m_pte |= value & 0xfffff000u;
  215. }
  216. dword raw() const { return *m_pte; }
  217. dword* ptr() { return m_pte; }
  218. enum Flags {
  219. Present = 1 << 0,
  220. ReadWrite = 1 << 1,
  221. UserSupervisor = 1 << 2,
  222. };
  223. bool is_present() const { return raw() & Present; }
  224. void set_present(bool b) { set_bit(Present, b); }
  225. bool is_user_allowed() const { return raw() & UserSupervisor; }
  226. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  227. bool is_writable() const { return raw() & ReadWrite; }
  228. void set_writable(bool b) { set_bit(ReadWrite, b); }
  229. void set_bit(byte bit, bool value)
  230. {
  231. if (value)
  232. *m_pte |= bit;
  233. else
  234. *m_pte &= ~bit;
  235. }
  236. dword* m_pte;
  237. };
  238. PageTableEntry ensure_pte(PageDirectory&, LinearAddress);
  239. RetainPtr<PageDirectory> m_kernel_page_directory;
  240. dword* m_page_table_zero;
  241. LinearAddress m_quickmap_addr;
  242. Vector<RetainPtr<PhysicalPage>> m_free_physical_pages;
  243. Vector<RetainPtr<PhysicalPage>> m_free_supervisor_physical_pages;
  244. HashTable<VMObject*> m_vmos;
  245. HashTable<Region*> m_regions;
  246. };
  247. struct ProcessPagingScope {
  248. ProcessPagingScope(Process& process) { MM.enter_process_paging_scope(process); }
  249. ~ProcessPagingScope() { MM.enter_process_paging_scope(*current); }
  250. };