MemoryManager.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. #pragma once
  2. #include "types.h"
  3. #include "i386.h"
  4. #include <AK/Bitmap.h>
  5. #include <AK/ByteBuffer.h>
  6. #include <AK/Retainable.h>
  7. #include <AK/RetainPtr.h>
  8. #include <AK/Vector.h>
  9. #include <AK/HashTable.h>
  10. #include <AK/AKString.h>
  11. #include <AK/Badge.h>
  12. #include <AK/Weakable.h>
  13. #include <Kernel/VirtualFileSystem.h>
  14. #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
  15. class Process;
  16. extern Process* current;
  17. class SynthFSInode;
  18. enum class PageFaultResponse {
  19. ShouldCrash,
  20. Continue,
  21. };
  22. class PhysicalPage {
  23. friend class MemoryManager;
  24. friend class PageDirectory;
  25. friend class VMObject;
  26. public:
  27. PhysicalAddress paddr() const { return m_paddr; }
  28. void retain()
  29. {
  30. ASSERT(m_retain_count);
  31. ++m_retain_count;
  32. }
  33. void release()
  34. {
  35. ASSERT(m_retain_count);
  36. if (!--m_retain_count) {
  37. if (m_may_return_to_freelist)
  38. return_to_freelist();
  39. else
  40. delete this;
  41. }
  42. }
  43. static Retained<PhysicalPage> create_eternal(PhysicalAddress, bool supervisor);
  44. static Retained<PhysicalPage> create(PhysicalAddress, bool supervisor);
  45. unsigned short retain_count() const { return m_retain_count; }
  46. private:
  47. PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
  48. ~PhysicalPage() { }
  49. void return_to_freelist();
  50. unsigned short m_retain_count { 1 };
  51. bool m_may_return_to_freelist { true };
  52. bool m_supervisor { false };
  53. PhysicalAddress m_paddr;
  54. };
  55. class PageDirectory : public Retainable<PageDirectory> {
  56. friend class MemoryManager;
  57. public:
  58. static Retained<PageDirectory> create() { return adopt(*new PageDirectory); }
  59. static Retained<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
  60. ~PageDirectory();
  61. dword cr3() const { return m_directory_page->paddr().get(); }
  62. dword* entries() { return reinterpret_cast<dword*>(cr3()); }
  63. void flush(LinearAddress);
  64. private:
  65. PageDirectory();
  66. explicit PageDirectory(PhysicalAddress);
  67. RetainPtr<PhysicalPage> m_directory_page;
  68. HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
  69. };
  70. class VMObject : public Retainable<VMObject>, public Weakable<VMObject> {
  71. friend class MemoryManager;
  72. public:
  73. static Retained<VMObject> create_file_backed(RetainPtr<Inode>&&);
  74. static Retained<VMObject> create_anonymous(size_t);
  75. static Retained<VMObject> create_for_physical_range(PhysicalAddress, size_t);
  76. Retained<VMObject> clone();
  77. ~VMObject();
  78. bool is_anonymous() const { return m_anonymous; }
  79. Inode* inode() { return m_inode.ptr(); }
  80. const Inode* inode() const { return m_inode.ptr(); }
  81. size_t inode_offset() const { return m_inode_offset; }
  82. String name() const { return m_name; }
  83. void set_name(const String& name) { m_name = name; }
  84. size_t page_count() const { return m_size / PAGE_SIZE; }
  85. const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
  86. Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
  87. void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const byte*);
  88. void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
  89. size_t size() const { return m_size; }
  90. private:
  91. VMObject(RetainPtr<Inode>&&);
  92. explicit VMObject(VMObject&);
  93. explicit VMObject(size_t);
  94. VMObject(PhysicalAddress, size_t);
  95. template<typename Callback> void for_each_region(Callback);
  96. String m_name;
  97. bool m_anonymous { false };
  98. off_t m_inode_offset { 0 };
  99. size_t m_size { 0 };
  100. bool m_allow_cpu_caching { true };
  101. RetainPtr<Inode> m_inode;
  102. Vector<RetainPtr<PhysicalPage>> m_physical_pages;
  103. Lock m_paging_lock;
  104. };
  105. class Region : public Retainable<Region> {
  106. friend class MemoryManager;
  107. public:
  108. Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false);
  109. Region(LinearAddress, size_t, Retained<VMObject>&&, size_t offset_in_vmo, String&&, bool r, bool w, bool cow = false);
  110. Region(LinearAddress, size_t, RetainPtr<Inode>&&, String&&, bool r, bool w);
  111. ~Region();
  112. LinearAddress laddr() const { return m_laddr; }
  113. size_t size() const { return m_size; }
  114. bool is_readable() const { return m_readable; }
  115. bool is_writable() const { return m_writable; }
  116. String name() const { return m_name; }
  117. void set_name(String&& name) { m_name = move(name); }
  118. const VMObject& vmo() const { return *m_vmo; }
  119. VMObject& vmo() { return *m_vmo; }
  120. bool is_shared() const { return m_shared; }
  121. void set_shared(bool shared) { m_shared = shared; }
  122. bool is_bitmap() const { return m_is_bitmap; }
  123. void set_is_bitmap(bool b) { m_is_bitmap = b; }
  124. Retained<Region> clone();
  125. bool contains(LinearAddress laddr) const
  126. {
  127. return laddr >= m_laddr && laddr < m_laddr.offset(size());
  128. }
  129. unsigned page_index_from_address(LinearAddress laddr) const
  130. {
  131. return (laddr - m_laddr).get() / PAGE_SIZE;
  132. }
  133. size_t first_page_index() const
  134. {
  135. return m_offset_in_vmo / PAGE_SIZE;
  136. }
  137. size_t last_page_index() const
  138. {
  139. return (first_page_index() + page_count()) - 1;
  140. }
  141. size_t page_count() const
  142. {
  143. return m_size / PAGE_SIZE;
  144. }
  145. bool page_in();
  146. int commit();
  147. size_t amount_resident() const;
  148. size_t amount_shared() const;
  149. PageDirectory* page_directory() { return m_page_directory.ptr(); }
  150. void set_page_directory(PageDirectory& page_directory)
  151. {
  152. ASSERT(!m_page_directory || m_page_directory.ptr() == &page_directory);
  153. m_page_directory = page_directory;
  154. }
  155. void release_page_directory()
  156. {
  157. ASSERT(m_page_directory);
  158. m_page_directory.clear();
  159. }
  160. const Bitmap& cow_map() const { return m_cow_map; }
  161. private:
  162. RetainPtr<PageDirectory> m_page_directory;
  163. LinearAddress m_laddr;
  164. size_t m_size { 0 };
  165. size_t m_offset_in_vmo { 0 };
  166. Retained<VMObject> m_vmo;
  167. String m_name;
  168. bool m_readable { true };
  169. bool m_writable { true };
  170. bool m_shared { false };
  171. bool m_is_bitmap { false };
  172. Bitmap m_cow_map;
  173. };
  174. #define MM MemoryManager::the()
  175. class MemoryManager {
  176. AK_MAKE_ETERNAL
  177. friend class PageDirectory;
  178. friend class PhysicalPage;
  179. friend class Region;
  180. friend class VMObject;
  181. friend ByteBuffer procfs$mm(InodeIdentifier);
  182. public:
  183. [[gnu::pure]] static MemoryManager& the();
  184. static void initialize();
  185. PageFaultResponse handle_page_fault(const PageFault&);
  186. bool map_region(Process&, Region&);
  187. bool unmap_region(Region&);
  188. void populate_page_directory(PageDirectory&);
  189. void enter_process_paging_scope(Process&);
  190. bool validate_user_read(const Process&, LinearAddress) const;
  191. bool validate_user_write(const Process&, LinearAddress) const;
  192. enum class ShouldZeroFill { No, Yes };
  193. RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill);
  194. RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
  195. void remap_region(PageDirectory&, Region&);
  196. size_t ram_size() const { return m_ram_size; }
  197. private:
  198. MemoryManager();
  199. ~MemoryManager();
  200. void register_vmo(VMObject&);
  201. void unregister_vmo(VMObject&);
  202. void register_region(Region&);
  203. void unregister_region(Region&);
  204. void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
  205. void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
  206. void initialize_paging();
  207. void flush_entire_tlb();
  208. void flush_tlb(LinearAddress);
  209. RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
  210. void map_protected(LinearAddress, size_t length);
  211. void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
  212. void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
  213. static Region* region_from_laddr(Process&, LinearAddress);
  214. static const Region* region_from_laddr(const Process&, LinearAddress);
  215. bool copy_on_write(Region&, unsigned page_index_in_region);
  216. bool page_in_from_inode(Region&, unsigned page_index_in_region);
  217. bool zero_page(Region& region, unsigned page_index_in_region);
  218. byte* quickmap_page(PhysicalPage&);
  219. void unquickmap_page();
  220. PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
  221. struct PageDirectoryEntry {
  222. explicit PageDirectoryEntry(dword* pde) : m_pde(pde) { }
  223. dword* page_table_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  224. void set_page_table_base(dword value)
  225. {
  226. *m_pde &= 0xfff;
  227. *m_pde |= value & 0xfffff000;
  228. }
  229. dword raw() const { return *m_pde; }
  230. dword* ptr() { return m_pde; }
  231. enum Flags {
  232. Present = 1 << 0,
  233. ReadWrite = 1 << 1,
  234. UserSupervisor = 1 << 2,
  235. WriteThrough = 1 << 3,
  236. CacheDisabled = 1 << 4,
  237. };
  238. bool is_present() const { return raw() & Present; }
  239. void set_present(bool b) { set_bit(Present, b); }
  240. bool is_user_allowed() const { return raw() & UserSupervisor; }
  241. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  242. bool is_writable() const { return raw() & ReadWrite; }
  243. void set_writable(bool b) { set_bit(ReadWrite, b); }
  244. bool is_write_through() const { return raw() & WriteThrough; }
  245. void set_write_through(bool b) { set_bit(WriteThrough, b); }
  246. bool is_cache_disabled() const { return raw() & CacheDisabled; }
  247. void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
  248. void set_bit(byte bit, bool value)
  249. {
  250. if (value)
  251. *m_pde |= bit;
  252. else
  253. *m_pde &= ~bit;
  254. }
  255. dword* m_pde;
  256. };
  257. struct PageTableEntry {
  258. explicit PageTableEntry(dword* pte) : m_pte(pte) { }
  259. dword* physical_page_base() { return reinterpret_cast<dword*>(raw() & 0xfffff000u); }
  260. void set_physical_page_base(dword value)
  261. {
  262. *m_pte &= 0xfffu;
  263. *m_pte |= value & 0xfffff000u;
  264. }
  265. dword raw() const { return *m_pte; }
  266. dword* ptr() { return m_pte; }
  267. enum Flags {
  268. Present = 1 << 0,
  269. ReadWrite = 1 << 1,
  270. UserSupervisor = 1 << 2,
  271. WriteThrough = 1 << 3,
  272. CacheDisabled = 1 << 4,
  273. };
  274. bool is_present() const { return raw() & Present; }
  275. void set_present(bool b) { set_bit(Present, b); }
  276. bool is_user_allowed() const { return raw() & UserSupervisor; }
  277. void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
  278. bool is_writable() const { return raw() & ReadWrite; }
  279. void set_writable(bool b) { set_bit(ReadWrite, b); }
  280. bool is_write_through() const { return raw() & WriteThrough; }
  281. void set_write_through(bool b) { set_bit(WriteThrough, b); }
  282. bool is_cache_disabled() const { return raw() & CacheDisabled; }
  283. void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
  284. void set_bit(byte bit, bool value)
  285. {
  286. if (value)
  287. *m_pte |= bit;
  288. else
  289. *m_pte &= ~bit;
  290. }
  291. dword* m_pte;
  292. };
  293. PageTableEntry ensure_pte(PageDirectory&, LinearAddress);
  294. RetainPtr<PageDirectory> m_kernel_page_directory;
  295. dword* m_page_table_zero;
  296. LinearAddress m_quickmap_addr;
  297. Vector<Retained<PhysicalPage>> m_free_physical_pages;
  298. Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages;
  299. HashTable<VMObject*> m_vmos;
  300. HashTable<Region*> m_regions;
  301. size_t m_ram_size { 0 };
  302. bool m_quickmap_in_use { false };
  303. };
  304. struct ProcessPagingScope {
  305. ProcessPagingScope(Process& process) { MM.enter_process_paging_scope(process); }
  306. ~ProcessPagingScope() { MM.enter_process_paging_scope(*current); }
  307. };