MemoryManager.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. #pragma once
  2. #include "types.h"
  3. #include "i386.h"
  4. #include <AK/Bitmap.h>
  5. #include <AK/ByteBuffer.h>
  6. #include <AK/Retainable.h>
  7. #include <AK/RetainPtr.h>
  8. #include <AK/Vector.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/AKString.h>
  11. #include <AK/Badge.h>
  12. #include <AK/Weakable.h>
  13. #include <Kernel/VirtualFileSystem.h>
  14. #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
  15. class SynthFSInode;
  16. enum class PageFaultResponse {
  17. ShouldCrash,
  18. Continue,
  19. };
  20. class PhysicalPage {
  21. friend class MemoryManager;
  22. friend class PageDirectory;
  23. friend class VMObject;
  24. public:
  25. PhysicalAddress paddr() const { return m_paddr; }
  26. void retain()
  27. {
  28. ASSERT(m_retain_count);
  29. ++m_retain_count;
  30. }
  31. void release()
  32. {
  33. ASSERT(m_retain_count);
  34. if (!--m_retain_count) {
  35. if (m_may_return_to_freelist)
  36. return_to_freelist();
  37. else
  38. delete this;
  39. }
  40. }
  41. static Retained<PhysicalPage> create_eternal(PhysicalAddress, bool supervisor);
  42. static Retained<PhysicalPage> create(PhysicalAddress, bool supervisor);
  43. unsigned short retain_count() const { return m_retain_count; }
  44. private:
  45. PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
  46. ~PhysicalPage() { }
  47. void return_to_freelist();
  48. unsigned short m_retain_count { 1 };
  49. bool m_may_return_to_freelist { true };
  50. bool m_supervisor { false };
  51. PhysicalAddress m_paddr;
  52. };
  53. class PageDirectory : public Retainable<PageDirectory> {
  54. friend class MemoryManager;
  55. public:
  56. static Retained<PageDirectory> create() { return adopt(*new PageDirectory); }
  57. static Retained<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
  58. ~PageDirectory();
  59. dword cr3() const { return m_directory_page->paddr().get(); }
  60. dword* entries() { return reinterpret_cast<dword*>(cr3()); }
  61. void flush(LinearAddress);
  62. private:
  63. PageDirectory();
  64. explicit PageDirectory(PhysicalAddress);
  65. RetainPtr<PhysicalPage> m_directory_page;
  66. HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
  67. };
  68. class VMObject : public Retainable<VMObject>, public Weakable<VMObject> {
  69. friend class MemoryManager;
  70. public:
  71. static Retained<VMObject> create_file_backed(RetainPtr<Inode>&&);
  72. static Retained<VMObject> create_anonymous(size_t);
  73. static Retained<VMObject> create_for_physical_range(PhysicalAddress, size_t);
  74. Retained<VMObject> clone();
  75. ~VMObject();
  76. bool is_anonymous() const { return m_anonymous; }
  77. Inode* inode() { return m_inode.ptr(); }
  78. const Inode* inode() const { return m_inode.ptr(); }
  79. size_t inode_offset() const { return m_inode_offset; }
  80. String name() const { return m_name; }
  81. void set_name(const String& name) { m_name = name; }
  82. size_t page_count() const { return m_size / PAGE_SIZE; }
  83. const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
  84. Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
  85. void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const byte*);
  86. void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
  87. size_t size() const { return m_size; }
  88. private:
  89. VMObject(RetainPtr<Inode>&&);
  90. explicit VMObject(VMObject&);
  91. explicit VMObject(size_t);
  92. VMObject(PhysicalAddress, size_t);
  93. template<typename Callback> void for_each_region(Callback);
  94. String m_name;
  95. bool m_anonymous { false };
  96. off_t m_inode_offset { 0 };
  97. size_t m_size { 0 };
  98. bool m_allow_cpu_caching { true };
  99. RetainPtr<Inode> m_inode;
  100. Vector<RetainPtr<PhysicalPage>> m_physical_pages;
  101. Lock m_paging_lock;
  102. };
  103. class Region : public Retainable<Region> {
  104. friend class MemoryManager;
  105. public:
  106. Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
  107. Region(LinearAddress, size_t, Retained<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
  108. Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
  109. ~Region();
  110. LinearAddress laddr() const { return m_laddr; }
  111. size_t size() const { return m_size; }
  112. bool is_readable() const { return m_readable; }
  113. bool is_writable() const { return m_writable; }
  114. String name() const { return m_name; }
  115. void set_name(String&& name) { m_name = move(name); }
  116. const VMObject& vmo() const { return *m_vmo; }
  117. VMObject& vmo() { return *m_vmo; }
  118. bool is_shared() const { return m_shared; }
  119. void set_shared(bool shared) { m_shared = shared; }
  120. bool is_bitmap() const { return m_is_bitmap; }
  121. void set_is_bitmap(bool b) { m_is_bitmap = b; }
  122. Retained<Region> clone();
  123. bool contains(LinearAddress laddr) const
  124. {
  125. return laddr >= m_laddr && laddr < m_laddr.offset(size());
  126. }
  127. unsigned page_index_from_address(LinearAddress laddr) const
  128. {
  129. return (laddr - m_laddr).get() / PAGE_SIZE;
  130. }
  131. size_t first_page_index() const
  132. {
  133. return m_offset_in_vmo / PAGE_SIZE;
  134. }
  135. size_t last_page_index() const
  136. {
  137. return (first_page_index() + page_count()) - 1;
  138. }
  139. size_t page_count() const
  140. {
  141. return m_size / PAGE_SIZE;
  142. }
  143. bool page_in();
  144. int commit();
  145. size_t amount_resident() const;
  146. size_t amount_shared() const;
  147. PageDirectory* page_directory() { return m_page_directory.ptr(); }
  148. void set_page_directory(PageDirectory& page_directory)
  149. {
  150. ASSERT(!m_page_directory || m_page_directory.ptr() == &page_directory);
  151. m_page_directory = page_directory;
  152. }
  153. void release_page_directory()
  154. {
  155. ASSERT(m_page_directory);
  156. m_page_directory.clear();
  157. }
  158. const Bitmap& cow_map() const { return m_cow_map; }
  159. void set_writable(bool b) { m_writable = b; }
  160. private:
  161. RetainPtr<PageDirectory> m_page_directory;
  162. LinearAddress m_laddr;
  163. size_t m_size { 0 };
  164. size_t m_offset_in_vmo { 0 };
  165. Retained<VMObject> m_vmo;
  166. String m_name;
  167. bool m_readable { true };
  168. bool m_writable { true };
  169. bool m_shared { false };
  170. bool m_is_bitmap { false };
  171. Bitmap m_cow_map;
  172. };
  173. #define MM MemoryManager::the()
  174. class MemoryManager {
  175. AK_MAKE_ETERNAL
  176. friend class PageDirectory;
  177. friend class PhysicalPage;
  178. friend class Region;
  179. friend class VMObject;
  180. friend ByteBuffer procfs$mm(InodeIdentifier);
  181. friend ByteBuffer procfs$memstat(InodeIdentifier);
  182. public:
  183. [[gnu::pure]] static MemoryManager& the();
  184. static void initialize();
  185. PageFaultResponse handle_page_fault(const PageFault&);
  186. bool map_region(Process&, Region&);
  187. bool unmap_region(Region&);
  188. void populate_page_directory(PageDirectory&);
  189. void enter_process_paging_scope(Process&);
  190. void enter_kernel_paging_scope();
  191. bool validate_user_read(const Process&, LinearAddress) const;
  192. bool validate_user_write(const Process&, LinearAddress) const;
  193. enum class ShouldZeroFill { No, Yes };
  194. RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill);
  195. RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
  196. void remap_region(PageDirectory&, Region&);
  197. size_t ram_size() const { return m_ram_size; }
  198. int user_physical_pages_in_existence() const { return s_user_physical_pages_in_existence; }
  199. int super_physical_pages_in_existence() const { return s_super_physical_pages_in_existence; }
  200. void map_for_kernel(LinearAddress, PhysicalAddress);
  201. private:
  202. MemoryManager();
  203. ~MemoryManager();
  204. void register_vmo(VMObject&);
  205. void unregister_vmo(VMObject&);
  206. void register_region(Region&);
  207. void unregister_region(Region&);
  208. void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
  209. void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
  210. void initialize_paging();
  211. void flush_entire_tlb();
  212. void flush_tlb(LinearAddress);
  213. RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
  214. void map_protected(LinearAddress, size_t length);
  215. void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
  216. void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
  217. static Region* region_from_laddr(Process&, LinearAddress);
  218. static const Region* region_from_laddr(const Process&, LinearAddress);
  219. bool copy_on_write(Region&, unsigned page_index_in_region);
  220. bool page_in_from_inode(Region&, unsigned page_index_in_region);
  221. bool zero_page(Region& region, unsigned page_index_in_region);
  222. byte* quickmap_page(PhysicalPage&);
  223. void unquickmap_page();
  224. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  225. struct PageDirectoryEntry {
  226. explicit PageDirectoryEntry(dword* pde) : m_pde(pde) { }
  227. dword* page_table_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  228. void set_page_table_base(dword value)
  229. {
  230. *m_pde &= 0xfff;
  231. *m_pde |= value & 0xfffff000;
  232. }
  233. dword raw() const { return *m_pde; }
  234. dword* ptr() { return m_pde; }
  235. enum Flags {
  236. Present = 1 << 0,
  237. ReadWrite = 1 << 1,
  238. UserSupervisor = 1 << 2,
  239. WriteThrough = 1 << 3,
  240. CacheDisabled = 1 << 4,
  241. };
  242. bool is_present() const { return raw() & Present; }
  243. void set_present(bool b) { set_bit(Present, b); }
  244. bool is_user_allowed() const { return raw() & UserSupervisor; }
  245. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  246. bool is_writable() const { return raw() & ReadWrite; }
  247. void set_writable(bool b) { set_bit(ReadWrite, b); }
  248. bool is_write_through() const { return raw() & WriteThrough; }
  249. void set_write_through(bool b) { set_bit(WriteThrough, b); }
  250. bool is_cache_disabled() const { return raw() & CacheDisabled; }
  251. void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
  252. void set_bit(byte bit, bool value)
  253. {
  254. if (value)
  255. *m_pde |= bit;
  256. else
  257. *m_pde &= ~bit;
  258. }
  259. dword* m_pde;
  260. };
  261. struct PageTableEntry {
  262. explicit PageTableEntry(dword* pte) : m_pte(pte) { }
  263. dword* physical_page_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  264. void set_physical_page_base(dword value)
  265. {
  266. *m_pte &= 0xfffu;
  267. *m_pte |= value & 0xfffff000u;
  268. }
  269. dword raw() const { return *m_pte; }
  270. dword* ptr() { return m_pte; }
  271. enum Flags {
  272. Present = 1 << 0,
  273. ReadWrite = 1 << 1,
  274. UserSupervisor = 1 << 2,
  275. WriteThrough = 1 << 3,
  276. CacheDisabled = 1 << 4,
  277. };
  278. bool is_present() const { return raw() & Present; }
  279. void set_present(bool b) { set_bit(Present, b); }
  280. bool is_user_allowed() const { return raw() & UserSupervisor; }
  281. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  282. bool is_writable() const { return raw() & ReadWrite; }
  283. void set_writable(bool b) { set_bit(ReadWrite, b); }
  284. bool is_write_through() const { return raw() & WriteThrough; }
  285. void set_write_through(bool b) { set_bit(WriteThrough, b); }
  286. bool is_cache_disabled() const { return raw() & CacheDisabled; }
  287. void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
  288. void set_bit(byte bit, bool value)
  289. {
  290. if (value)
  291. *m_pte |= bit;
  292. else
  293. *m_pte &= ~bit;
  294. }
  295. dword* m_pte;
  296. };
  297. static unsigned s_user_physical_pages_in_existence;
  298. static unsigned s_super_physical_pages_in_existence;
  299. PageTableEntry ensure_pte(PageDirectory&, LinearAddress);
  300. RetainPtr<PageDirectory> m_kernel_page_directory;
  301. dword* m_page_table_zero;
  302. LinearAddress m_quickmap_addr;
  303. Vector<Retained<PhysicalPage>> m_free_physical_pages;
  304. Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages;
  305. HashTable<VMObject*> m_vmos;
  306. HashTable<Region*> m_regions;
  307. size_t m_ram_size { 0 };
  308. bool m_quickmap_in_use { false };
  309. };
  310. struct ProcessPagingScope {
  311. ProcessPagingScope(Process&);
  312. ~ProcessPagingScope();
  313. };
  314. struct KernelPagingScope {
  315. KernelPagingScope();
  316. ~KernelPagingScope();
  317. };