MemoryManager.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. #pragma once
  2. #include "types.h"
  3. #include "i386.h"
  4. #include <AK/Bitmap.h>
  5. #include <AK/ByteBuffer.h>
  6. #include <AK/Retainable.h>
  7. #include <AK/RetainPtr.h>
  8. #include <AK/Vector.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/AKString.h>
  11. #include <AK/Badge.h>
  12. #include <Kernel/VirtualFileSystem.h>
  13. #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
  14. class Process;
  15. extern Process* current;
  16. class SynthFSInode;
  17. enum class PageFaultResponse {
  18. ShouldCrash,
  19. Continue,
  20. };
  21. class PhysicalPage {
  22. AK_MAKE_ETERNAL
  23. friend class MemoryManager;
  24. friend class PageDirectory;
  25. friend class VMObject;
  26. public:
  27. PhysicalAddress paddr() const { return m_paddr; }
  28. void retain()
  29. {
  30. ASSERT(m_retain_count);
  31. ++m_retain_count;
  32. }
  33. void release()
  34. {
  35. ASSERT(m_retain_count);
  36. if (!--m_retain_count)
  37. return_to_freelist();
  38. }
  39. unsigned short retain_count() const { return m_retain_count; }
  40. private:
  41. PhysicalPage(PhysicalAddress paddr, bool supervisor);
  42. ~PhysicalPage() = delete;
  43. void return_to_freelist();
  44. unsigned short m_retain_count { 1 };
  45. bool m_supervisor { false };
  46. PhysicalAddress m_paddr;
  47. };
  48. class PageDirectory : public Retainable<PageDirectory> {
  49. friend class MemoryManager;
  50. public:
  51. static RetainPtr<PageDirectory> create() { return adopt(*new PageDirectory); }
  52. static RetainPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
  53. ~PageDirectory();
  54. dword cr3() const { return m_directory_page->paddr().get(); }
  55. dword* entries() { return reinterpret_cast<dword*>(cr3()); }
  56. void flush(LinearAddress);
  57. private:
  58. PageDirectory();
  59. explicit PageDirectory(PhysicalAddress);
  60. RetainPtr<PhysicalPage> m_directory_page;
  61. HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
  62. };
  63. class VMObject : public Retainable<VMObject> {
  64. friend class MemoryManager;
  65. public:
  66. static RetainPtr<VMObject> create_file_backed(RetainPtr<Inode>&&);
  67. static RetainPtr<VMObject> create_anonymous(size_t);
  68. static RetainPtr<VMObject> create_framebuffer_wrapper(PhysicalAddress, size_t);
  69. RetainPtr<VMObject> clone();
  70. ~VMObject();
  71. bool is_anonymous() const { return m_anonymous; }
  72. Inode* inode() { return m_inode.ptr(); }
  73. const Inode* inode() const { return m_inode.ptr(); }
  74. size_t inode_offset() const { return m_inode_offset; }
  75. String name() const { return m_name; }
  76. void set_name(const String& name) { m_name = name; }
  77. size_t page_count() const { return m_size / PAGE_SIZE; }
  78. const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
  79. Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
  80. void inode_contents_changed(Badge<Inode>, off_t, size_t, const byte*);
  81. void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
  82. private:
  83. VMObject(RetainPtr<Inode>&&);
  84. explicit VMObject(VMObject&);
  85. explicit VMObject(size_t);
  86. VMObject(PhysicalAddress, size_t);
  87. template<typename Callback> void for_each_region(Callback);
  88. String m_name;
  89. bool m_anonymous { false };
  90. off_t m_inode_offset { 0 };
  91. size_t m_size { 0 };
  92. RetainPtr<Inode> m_inode;
  93. Vector<RetainPtr<PhysicalPage>> m_physical_pages;
  94. Lock m_paging_lock;
  95. };
  96. class Region : public Retainable<Region> {
  97. friend class MemoryManager;
  98. public:
  99. Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
  100. Region(LinearAddress, size_t, RetainPtr<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
  101. Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
  102. ~Region();
  103. LinearAddress laddr() const { return m_laddr; }
  104. size_t size() const { return m_size; }
  105. bool is_readable() const { return m_readable; }
  106. bool is_writable() const { return m_writable; }
  107. String name() const { return m_name; }
  108. void set_name(String&& name) { m_name = move(name); }
  109. const VMObject& vmo() const { return *m_vmo; }
  110. VMObject& vmo() { return *m_vmo; }
  111. void set_shared(bool shared) { m_shared = shared; }
  112. RetainPtr<Region> clone();
  113. bool contains(LinearAddress laddr) const
  114. {
  115. return laddr >= m_laddr && laddr < m_laddr.offset(size());
  116. }
  117. unsigned page_index_from_address(LinearAddress laddr) const
  118. {
  119. return (laddr - m_laddr).get() / PAGE_SIZE;
  120. }
  121. size_t first_page_index() const
  122. {
  123. return m_offset_in_vmo / PAGE_SIZE;
  124. }
  125. size_t last_page_index() const
  126. {
  127. return (first_page_index() + page_count()) - 1;
  128. }
  129. size_t page_count() const
  130. {
  131. return m_size / PAGE_SIZE;
  132. }
  133. bool page_in();
  134. int commit();
  135. size_t amount_resident() const;
  136. size_t amount_shared() const;
  137. PageDirectory* page_directory() { return m_page_directory.ptr(); }
  138. void set_page_directory(PageDirectory& page_directory)
  139. {
  140. ASSERT(!m_page_directory || m_page_directory.ptr() == &page_directory);
  141. m_page_directory = page_directory;
  142. }
  143. void release_page_directory()
  144. {
  145. ASSERT(m_page_directory);
  146. m_page_directory.clear();
  147. }
  148. const Bitmap& cow_map() const { return m_cow_map; }
  149. private:
  150. RetainPtr<PageDirectory> m_page_directory;
  151. LinearAddress m_laddr;
  152. size_t m_size { 0 };
  153. size_t m_offset_in_vmo { 0 };
  154. RetainPtr<VMObject> m_vmo;
  155. String m_name;
  156. bool m_readable { true };
  157. bool m_writable { true };
  158. bool m_shared { false };
  159. Bitmap m_cow_map;
  160. };
  161. #define MM MemoryManager::the()
  162. class MemoryManager {
  163. AK_MAKE_ETERNAL
  164. friend class PageDirectory;
  165. friend class PhysicalPage;
  166. friend class Region;
  167. friend class VMObject;
  168. friend ByteBuffer procfs$mm(InodeIdentifier);
  169. public:
  170. static MemoryManager& the() PURE;
  171. static void initialize();
  172. PageFaultResponse handle_page_fault(const PageFault&);
  173. bool map_region(Process&, Region&);
  174. bool unmap_region(Region&);
  175. void populate_page_directory(PageDirectory&);
  176. void enter_process_paging_scope(Process&);
  177. bool validate_user_read(const Process&, LinearAddress) const;
  178. bool validate_user_write(const Process&, LinearAddress) const;
  179. enum class ShouldZeroFill { No, Yes };
  180. RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill);
  181. RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
  182. void remap_region(PageDirectory&, Region&);
  183. size_t ram_size() const { return m_ram_size; }
  184. private:
  185. MemoryManager();
  186. ~MemoryManager();
  187. void register_vmo(VMObject&);
  188. void unregister_vmo(VMObject&);
  189. void register_region(Region&);
  190. void unregister_region(Region&);
  191. void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
  192. void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
  193. void initialize_paging();
  194. void flush_entire_tlb();
  195. void flush_tlb(LinearAddress);
  196. RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
  197. void map_protected(LinearAddress, size_t length);
  198. void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
  199. void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
  200. static Region* region_from_laddr(Process&, LinearAddress);
  201. static const Region* region_from_laddr(const Process&, LinearAddress);
  202. bool copy_on_write(Region&, unsigned page_index_in_region);
  203. bool page_in_from_inode(Region&, unsigned page_index_in_region);
  204. bool zero_page(Region& region, unsigned page_index_in_region);
  205. byte* quickmap_page(PhysicalPage&);
  206. void unquickmap_page();
  207. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  208. struct PageDirectoryEntry {
  209. explicit PageDirectoryEntry(dword* pde) : m_pde(pde) { }
  210. dword* page_table_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  211. void set_page_table_base(dword value)
  212. {
  213. *m_pde &= 0xfff;
  214. *m_pde |= value & 0xfffff000;
  215. }
  216. dword raw() const { return *m_pde; }
  217. dword* ptr() { return m_pde; }
  218. enum Flags {
  219. Present = 1 << 0,
  220. ReadWrite = 1 << 1,
  221. UserSupervisor = 1 << 2,
  222. };
  223. bool is_present() const { return raw() & Present; }
  224. void set_present(bool b) { set_bit(Present, b); }
  225. bool is_user_allowed() const { return raw() & UserSupervisor; }
  226. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  227. bool is_writable() const { return raw() & ReadWrite; }
  228. void set_writable(bool b) { set_bit(ReadWrite, b); }
  229. void set_bit(byte bit, bool value)
  230. {
  231. if (value)
  232. *m_pde |= bit;
  233. else
  234. *m_pde &= ~bit;
  235. }
  236. dword* m_pde;
  237. };
  238. struct PageTableEntry {
  239. explicit PageTableEntry(dword* pte) : m_pte(pte) { }
  240. dword* physical_page_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  241. void set_physical_page_base(dword value)
  242. {
  243. *m_pte &= 0xfffu;
  244. *m_pte |= value & 0xfffff000u;
  245. }
  246. dword raw() const { return *m_pte; }
  247. dword* ptr() { return m_pte; }
  248. enum Flags {
  249. Present = 1 << 0,
  250. ReadWrite = 1 << 1,
  251. UserSupervisor = 1 << 2,
  252. };
  253. bool is_present() const { return raw() & Present; }
  254. void set_present(bool b) { set_bit(Present, b); }
  255. bool is_user_allowed() const { return raw() & UserSupervisor; }
  256. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  257. bool is_writable() const { return raw() & ReadWrite; }
  258. void set_writable(bool b) { set_bit(ReadWrite, b); }
  259. void set_bit(byte bit, bool value)
  260. {
  261. if (value)
  262. *m_pte |= bit;
  263. else
  264. *m_pte &= ~bit;
  265. }
  266. dword* m_pte;
  267. };
  268. PageTableEntry ensure_pte(PageDirectory&, LinearAddress);
  269. RetainPtr<PageDirectory> m_kernel_page_directory;
  270. dword* m_page_table_zero;
  271. LinearAddress m_quickmap_addr;
  272. Vector<RetainPtr<PhysicalPage>> m_free_physical_pages;
  273. Vector<RetainPtr<PhysicalPage>> m_free_supervisor_physical_pages;
  274. HashTable<VMObject*> m_vmos;
  275. HashTable<Region*> m_regions;
  276. size_t m_ram_size { 0 };
  277. bool m_quickmap_in_use { false };
  278. };
  279. struct ProcessPagingScope {
  280. ProcessPagingScope(Process& process) { MM.enter_process_paging_scope(process); }
  281. ~ProcessPagingScope() { MM.enter_process_paging_scope(*current); }
  282. };