MemoryManager.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. #pragma once
  2. #include "types.h"
  3. #include "i386.h"
  4. #include <AK/Bitmap.h>
  5. #include <AK/ByteBuffer.h>
  6. #include <AK/Retainable.h>
  7. #include <AK/RetainPtr.h>
  8. #include <AK/Vector.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/AKString.h>
  11. #include <Kernel/VirtualFileSystem.h>
  12. #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
  13. class Process;
  14. extern Process* current;
  15. class SynthFSInode;
  16. enum class PageFaultResponse {
  17. ShouldCrash,
  18. Continue,
  19. };
  20. class PhysicalPage {
  21. AK_MAKE_ETERNAL
  22. friend class MemoryManager;
  23. friend class PageDirectory;
  24. friend class VMObject;
  25. public:
  26. PhysicalAddress paddr() const { return m_paddr; }
  27. void retain()
  28. {
  29. ASSERT(m_retain_count);
  30. ++m_retain_count;
  31. }
  32. void release()
  33. {
  34. ASSERT(m_retain_count);
  35. if (!--m_retain_count)
  36. return_to_freelist();
  37. }
  38. unsigned short retain_count() const { return m_retain_count; }
  39. private:
  40. PhysicalPage(PhysicalAddress paddr, bool supervisor);
  41. ~PhysicalPage() = delete;
  42. void return_to_freelist();
  43. unsigned short m_retain_count { 1 };
  44. bool m_supervisor { false };
  45. PhysicalAddress m_paddr;
  46. };
  47. class PageDirectory : public Retainable<PageDirectory> {
  48. friend class MemoryManager;
  49. public:
  50. static RetainPtr<PageDirectory> create() { return adopt(*new PageDirectory); }
  51. static RetainPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
  52. ~PageDirectory();
  53. dword cr3() const { return m_directory_page->paddr().get(); }
  54. dword* entries() { return reinterpret_cast<dword*>(cr3()); }
  55. void flush(LinearAddress);
  56. private:
  57. PageDirectory();
  58. explicit PageDirectory(PhysicalAddress);
  59. RetainPtr<PhysicalPage> m_directory_page;
  60. HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
  61. };
  62. class VMObject : public Retainable<VMObject> {
  63. friend class MemoryManager;
  64. public:
  65. static RetainPtr<VMObject> create_file_backed(RetainPtr<Inode>&&, size_t);
  66. static RetainPtr<VMObject> create_anonymous(size_t);
  67. static RetainPtr<VMObject> create_framebuffer_wrapper(PhysicalAddress, size_t);
  68. RetainPtr<VMObject> clone();
  69. ~VMObject();
  70. bool is_anonymous() const { return m_anonymous; }
  71. Inode* inode() { return m_inode.ptr(); }
  72. const Inode* inode() const { return m_inode.ptr(); }
  73. size_t inode_offset() const { return m_inode_offset; }
  74. String name() const { return m_name; }
  75. void set_name(const String& name) { m_name = name; }
  76. size_t page_count() const { return m_size / PAGE_SIZE; }
  77. const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
  78. Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
  79. private:
  80. VMObject(RetainPtr<Inode>&&, size_t);
  81. explicit VMObject(VMObject&);
  82. explicit VMObject(size_t);
  83. VMObject(PhysicalAddress, size_t);
  84. String m_name;
  85. bool m_anonymous { false };
  86. off_t m_inode_offset { 0 };
  87. size_t m_size { 0 };
  88. RetainPtr<Inode> m_inode;
  89. Vector<RetainPtr<PhysicalPage>> m_physical_pages;
  90. Lock m_paging_lock;
  91. };
  92. class Region : public Retainable<Region> {
  93. friend class MemoryManager;
  94. public:
  95. Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
  96. Region(LinearAddress, size_t, RetainPtr<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
  97. Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
  98. ~Region();
  99. LinearAddress laddr() const { return m_laddr; }
  100. size_t size() const { return m_size; }
  101. bool is_readable() const { return m_readable; }
  102. bool is_writable() const { return m_writable; }
  103. String name() const { return m_name; }
  104. void set_name(String&& name) { m_name = move(name); }
  105. const VMObject& vmo() const { return *m_vmo; }
  106. VMObject& vmo() { return *m_vmo; }
  107. void set_shared(bool shared) { m_shared = shared; }
  108. RetainPtr<Region> clone();
  109. bool contains(LinearAddress laddr) const
  110. {
  111. return laddr >= m_laddr && laddr < m_laddr.offset(size());
  112. }
  113. unsigned page_index_from_address(LinearAddress laddr) const
  114. {
  115. return (laddr - m_laddr).get() / PAGE_SIZE;
  116. }
  117. size_t first_page_index() const
  118. {
  119. return m_offset_in_vmo / PAGE_SIZE;
  120. }
  121. size_t last_page_index() const
  122. {
  123. return (first_page_index() + page_count()) - 1;
  124. }
  125. size_t page_count() const
  126. {
  127. return m_size / PAGE_SIZE;
  128. }
  129. bool page_in();
  130. int commit();
  131. size_t committed() const;
  132. PageDirectory* page_directory() { return m_page_directory.ptr(); }
  133. void set_page_directory(PageDirectory& page_directory)
  134. {
  135. ASSERT(!m_page_directory || m_page_directory.ptr() == &page_directory);
  136. m_page_directory = page_directory;
  137. }
  138. void release_page_directory()
  139. {
  140. ASSERT(m_page_directory);
  141. m_page_directory.clear();
  142. }
  143. const Bitmap& cow_map() const { return m_cow_map; }
  144. private:
  145. RetainPtr<PageDirectory> m_page_directory;
  146. LinearAddress m_laddr;
  147. size_t m_size { 0 };
  148. size_t m_offset_in_vmo { 0 };
  149. RetainPtr<VMObject> m_vmo;
  150. String m_name;
  151. bool m_readable { true };
  152. bool m_writable { true };
  153. bool m_shared { false };
  154. Bitmap m_cow_map;
  155. };
  156. #define MM MemoryManager::the()
  157. class MemoryManager {
  158. AK_MAKE_ETERNAL
  159. friend class PageDirectory;
  160. friend class PhysicalPage;
  161. friend class Region;
  162. friend class VMObject;
  163. friend ByteBuffer procfs$mm(SynthFSInode&);
  164. public:
  165. static MemoryManager& the() PURE;
  166. static void initialize();
  167. PageFaultResponse handle_page_fault(const PageFault&);
  168. bool map_region(Process&, Region&);
  169. bool unmap_region(Region&);
  170. void populate_page_directory(PageDirectory&);
  171. void enter_process_paging_scope(Process&);
  172. bool validate_user_read(const Process&, LinearAddress) const;
  173. bool validate_user_write(const Process&, LinearAddress) const;
  174. enum class ShouldZeroFill { No, Yes };
  175. RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill);
  176. RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
  177. void remap_region(Process&, Region&);
  178. size_t ram_size() const { return m_ram_size; }
  179. private:
  180. MemoryManager();
  181. ~MemoryManager();
  182. void register_vmo(VMObject&);
  183. void unregister_vmo(VMObject&);
  184. void register_region(Region&);
  185. void unregister_region(Region&);
  186. void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
  187. void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
  188. void initialize_paging();
  189. void flush_entire_tlb();
  190. void flush_tlb(LinearAddress);
  191. RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
  192. void map_protected(LinearAddress, size_t length);
  193. void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
  194. void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
  195. static Region* region_from_laddr(Process&, LinearAddress);
  196. static const Region* region_from_laddr(const Process&, LinearAddress);
  197. bool copy_on_write(Region&, unsigned page_index_in_region);
  198. bool page_in_from_inode(Region&, unsigned page_index_in_region);
  199. bool zero_page(Region& region, unsigned page_index_in_region);
  200. byte* quickmap_page(PhysicalPage&);
  201. void unquickmap_page();
  202. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  203. struct PageDirectoryEntry {
  204. explicit PageDirectoryEntry(dword* pde) : m_pde(pde) { }
  205. dword* page_table_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  206. void set_page_table_base(dword value)
  207. {
  208. *m_pde &= 0xfff;
  209. *m_pde |= value & 0xfffff000;
  210. }
  211. dword raw() const { return *m_pde; }
  212. dword* ptr() { return m_pde; }
  213. enum Flags {
  214. Present = 1 << 0,
  215. ReadWrite = 1 << 1,
  216. UserSupervisor = 1 << 2,
  217. };
  218. bool is_present() const { return raw() & Present; }
  219. void set_present(bool b) { set_bit(Present, b); }
  220. bool is_user_allowed() const { return raw() & UserSupervisor; }
  221. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  222. bool is_writable() const { return raw() & ReadWrite; }
  223. void set_writable(bool b) { set_bit(ReadWrite, b); }
  224. void set_bit(byte bit, bool value)
  225. {
  226. if (value)
  227. *m_pde |= bit;
  228. else
  229. *m_pde &= ~bit;
  230. }
  231. dword* m_pde;
  232. };
  233. struct PageTableEntry {
  234. explicit PageTableEntry(dword* pte) : m_pte(pte) { }
  235. dword* physical_page_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  236. void set_physical_page_base(dword value)
  237. {
  238. *m_pte &= 0xfffu;
  239. *m_pte |= value & 0xfffff000u;
  240. }
  241. dword raw() const { return *m_pte; }
  242. dword* ptr() { return m_pte; }
  243. enum Flags {
  244. Present = 1 << 0,
  245. ReadWrite = 1 << 1,
  246. UserSupervisor = 1 << 2,
  247. };
  248. bool is_present() const { return raw() & Present; }
  249. void set_present(bool b) { set_bit(Present, b); }
  250. bool is_user_allowed() const { return raw() & UserSupervisor; }
  251. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  252. bool is_writable() const { return raw() & ReadWrite; }
  253. void set_writable(bool b) { set_bit(ReadWrite, b); }
  254. void set_bit(byte bit, bool value)
  255. {
  256. if (value)
  257. *m_pte |= bit;
  258. else
  259. *m_pte &= ~bit;
  260. }
  261. dword* m_pte;
  262. };
  263. PageTableEntry ensure_pte(PageDirectory&, LinearAddress);
  264. RetainPtr<PageDirectory> m_kernel_page_directory;
  265. dword* m_page_table_zero;
  266. LinearAddress m_quickmap_addr;
  267. Vector<RetainPtr<PhysicalPage>> m_free_physical_pages;
  268. Vector<RetainPtr<PhysicalPage>> m_free_supervisor_physical_pages;
  269. HashTable<VMObject*> m_vmos;
  270. HashTable<Region*> m_regions;
  271. size_t m_ram_size { 0 };
  272. bool m_quickmap_in_use { false };
  273. };
  274. struct ProcessPagingScope {
  275. ProcessPagingScope(Process& process) { MM.enter_process_paging_scope(process); }
  276. ~ProcessPagingScope() { MM.enter_process_paging_scope(*current); }
  277. };