MemoryManager.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. #include <Kernel/VM/MemoryManager.h>
  2. #include <AK/Assertions.h>
  3. #include <AK/kstdio.h>
  4. #include "i386.h"
  5. #include "StdLib.h"
  6. #include "Process.h"
  7. #include "CMOS.h"
  8. //#define MM_DEBUG
  9. //#define PAGE_FAULT_DEBUG
  10. static MemoryManager* s_the;
  11. unsigned MemoryManager::s_user_physical_pages_in_existence;
  12. unsigned MemoryManager::s_super_physical_pages_in_existence;
  13. MemoryManager& MM
  14. {
  15. return *s_the;
  16. }
  17. MemoryManager::MemoryManager()
  18. {
  19. // FIXME: This is not the best way to do memory map detection.
  20. // Rewrite to use BIOS int 15,e820 once we have VM86 support.
  21. word base_memory = (CMOS::read(0x16) << 8) | CMOS::read(0x15);
  22. word ext_memory = (CMOS::read(0x18) << 8) | CMOS::read(0x17);
  23. kprintf("%u kB base memory\n", base_memory);
  24. kprintf("%u kB extended memory\n", ext_memory);
  25. m_ram_size = ext_memory * 1024;
  26. m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(0x4000));
  27. m_page_table_zero = (dword*)0x6000;
  28. initialize_paging();
  29. kprintf("MM initialized.\n");
  30. }
  31. MemoryManager::~MemoryManager()
  32. {
  33. }
  34. void MemoryManager::populate_page_directory(PageDirectory& page_directory)
  35. {
  36. page_directory.m_directory_page = allocate_supervisor_physical_page();
  37. page_directory.entries()[0] = kernel_page_directory().entries()[0];
  38. // Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF
  39. for (int i = 768; i < 1024; ++i)
  40. page_directory.entries()[i] = kernel_page_directory().entries()[i];
  41. }
  42. void MemoryManager::initialize_paging()
  43. {
  44. static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
  45. static_assert(sizeof(MemoryManager::PageTableEntry) == 4);
  46. memset(m_page_table_zero, 0, PAGE_SIZE);
  47. #ifdef MM_DEBUG
  48. dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
  49. #endif
  50. #ifdef MM_DEBUG
  51. dbgprintf("MM: Protect against null dereferences\n");
  52. #endif
  53. // Make null dereferences crash.
  54. map_protected(LinearAddress(0), PAGE_SIZE);
  55. #ifdef MM_DEBUG
  56. dbgprintf("MM: Identity map bottom 4MB\n");
  57. #endif
  58. // The bottom 4 MB (except for the null page) are identity mapped & supervisor only.
  59. // Every process shares these mappings.
  60. create_identity_mapping(kernel_page_directory(), LinearAddress(PAGE_SIZE), (4 * MB) - PAGE_SIZE);
  61. // Basic memory map:
  62. // 0 -> 512 kB Kernel code. Root page directory & PDE 0.
  63. // (last page before 1MB) Used by quickmap_page().
  64. // 1 MB -> 2 MB kmalloc_eternal() space.
  65. // 2 MB -> 3 MB kmalloc() space.
  66. // 3 MB -> 4 MB Supervisor physical pages (available for allocation!)
  67. // 4 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
  68. // 0xc0000000-0xffffffff Kernel-only linear address space
  69. for (size_t i = (2 * MB); i < (4 * MB); i += PAGE_SIZE)
  70. m_free_supervisor_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(i), true));
  71. dbgprintf("MM: 4MB-%uMB available for allocation\n", m_ram_size / 1048576);
  72. for (size_t i = (4 * MB); i < m_ram_size; i += PAGE_SIZE)
  73. m_free_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(i), false));
  74. m_quickmap_addr = LinearAddress((1 * MB) - PAGE_SIZE);
  75. #ifdef MM_DEBUG
  76. dbgprintf("MM: Quickmap will use P%x\n", m_quickmap_addr.get());
  77. dbgprintf("MM: Installing page directory\n");
  78. #endif
  79. asm volatile("movl %%eax, %%cr3"::"a"(kernel_page_directory().cr3()));
  80. asm volatile(
  81. "movl %%cr0, %%eax\n"
  82. "orl $0x80000001, %%eax\n"
  83. "movl %%eax, %%cr0\n"
  84. :::"%eax", "memory");
  85. #ifdef MM_DEBUG
  86. dbgprintf("MM: Paging initialized.\n");
  87. #endif
  88. }
  89. RetainPtr<PhysicalPage> MemoryManager::allocate_page_table(PageDirectory& page_directory, unsigned index)
  90. {
  91. ASSERT(!page_directory.m_physical_pages.contains(index));
  92. auto physical_page = allocate_supervisor_physical_page();
  93. if (!physical_page)
  94. return nullptr;
  95. page_directory.m_physical_pages.set(index, physical_page.copy_ref());
  96. return physical_page;
  97. }
  98. void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, LinearAddress laddr, size_t size)
  99. {
  100. InterruptDisabler disabler;
  101. // FIXME: ASSERT(laddr is 4KB aligned);
  102. for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
  103. auto pte_address = laddr.offset(offset);
  104. auto pte = ensure_pte(page_directory, pte_address);
  105. pte.set_physical_page_base(0);
  106. pte.set_user_allowed(false);
  107. pte.set_present(true);
  108. pte.set_writable(true);
  109. flush_tlb(pte_address);
  110. }
  111. }
  112. auto MemoryManager::ensure_pte(PageDirectory& page_directory, LinearAddress laddr) -> PageTableEntry
  113. {
  114. ASSERT_INTERRUPTS_DISABLED();
  115. dword page_directory_index = (laddr.get() >> 22) & 0x3ff;
  116. dword page_table_index = (laddr.get() >> 12) & 0x3ff;
  117. PageDirectoryEntry pde = PageDirectoryEntry(&page_directory.entries()[page_directory_index]);
  118. if (!pde.is_present()) {
  119. #ifdef MM_DEBUG
  120. dbgprintf("MM: PDE %u not present (requested for L%x), allocating\n", page_directory_index, laddr.get());
  121. #endif
  122. if (page_directory_index == 0) {
  123. ASSERT(&page_directory == m_kernel_page_directory);
  124. pde.set_page_table_base((dword)m_page_table_zero);
  125. pde.set_user_allowed(false);
  126. pde.set_present(true);
  127. pde.set_writable(true);
  128. } else {
  129. //ASSERT(&page_directory != m_kernel_page_directory.ptr());
  130. auto page_table = allocate_page_table(page_directory, page_directory_index);
  131. #ifdef MM_DEBUG
  132. dbgprintf("MM: PD K%x (%s) at P%x allocated page table #%u (for L%x) at P%x\n",
  133. &page_directory,
  134. &page_directory == m_kernel_page_directory ? "Kernel" : "User",
  135. page_directory.cr3(),
  136. page_directory_index,
  137. laddr.get(),
  138. page_table->paddr().get());
  139. #endif
  140. pde.set_page_table_base(page_table->paddr().get());
  141. pde.set_user_allowed(true);
  142. pde.set_present(true);
  143. pde.set_writable(true);
  144. page_directory.m_physical_pages.set(page_directory_index, move(page_table));
  145. }
  146. }
  147. return PageTableEntry(&pde.page_table_base()[page_table_index]);
  148. }
  149. void MemoryManager::map_protected(LinearAddress laddr, size_t length)
  150. {
  151. InterruptDisabler disabler;
  152. // FIXME: ASSERT(linearAddress is 4KB aligned);
  153. for (dword offset = 0; offset < length; offset += PAGE_SIZE) {
  154. auto pte_address = laddr.offset(offset);
  155. auto pte = ensure_pte(kernel_page_directory(), pte_address);
  156. pte.set_physical_page_base(pte_address.get());
  157. pte.set_user_allowed(false);
  158. pte.set_present(false);
  159. pte.set_writable(false);
  160. flush_tlb(pte_address);
  161. }
  162. }
  163. void MemoryManager::create_identity_mapping(PageDirectory& page_directory, LinearAddress laddr, size_t size)
  164. {
  165. InterruptDisabler disabler;
  166. ASSERT((laddr.get() & ~PAGE_MASK) == 0);
  167. for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
  168. auto pte_address = laddr.offset(offset);
  169. auto pte = ensure_pte(page_directory, pte_address);
  170. pte.set_physical_page_base(pte_address.get());
  171. pte.set_user_allowed(false);
  172. pte.set_present(true);
  173. pte.set_writable(true);
  174. page_directory.flush(pte_address);
  175. }
  176. }
  177. void MemoryManager::initialize()
  178. {
  179. s_the = new MemoryManager;
  180. }
  181. Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
  182. {
  183. ASSERT_INTERRUPTS_DISABLED();
  184. if (laddr.get() >= 0xc0000000) {
  185. for (auto& region : MM.m_kernel_regions) {
  186. if (region->contains(laddr))
  187. return region;
  188. }
  189. }
  190. // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
  191. for (auto& region : process.m_regions) {
  192. if (region->contains(laddr))
  193. return region.ptr();
  194. }
  195. dbgprintf("%s(%u) Couldn't find region for L%x (CR3=%x)\n", process.name().characters(), process.pid(), laddr.get(), process.page_directory().cr3());
  196. return nullptr;
  197. }
  198. const Region* MemoryManager::region_from_laddr(const Process& process, LinearAddress laddr)
  199. {
  200. if (laddr.get() >= 0xc0000000) {
  201. for (auto& region : MM.m_kernel_regions) {
  202. if (region->contains(laddr))
  203. return region;
  204. }
  205. }
  206. // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
  207. for (auto& region : process.m_regions) {
  208. if (region->contains(laddr))
  209. return region.ptr();
  210. }
  211. dbgprintf("%s(%u) Couldn't find region for L%x (CR3=%x)\n", process.name().characters(), process.pid(), laddr.get(), process.page_directory().cr3());
  212. return nullptr;
  213. }
  214. bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
  215. {
  216. ASSERT_INTERRUPTS_DISABLED();
  217. auto& vmo = region.vmo();
  218. auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
  219. sti();
  220. LOCKER(vmo.m_paging_lock);
  221. cli();
  222. if (!vmo_page.is_null()) {
  223. #ifdef PAGE_FAULT_DEBUG
  224. dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
  225. #endif
  226. remap_region_page(region, page_index_in_region, true);
  227. return true;
  228. }
  229. auto physical_page = allocate_physical_page(ShouldZeroFill::Yes);
  230. #ifdef PAGE_FAULT_DEBUG
  231. dbgprintf(" >> ZERO P%x\n", physical_page->paddr().get());
  232. #endif
  233. region.m_cow_map.set(page_index_in_region, false);
  234. vmo.physical_pages()[page_index_in_region] = move(physical_page);
  235. remap_region_page(region, page_index_in_region, true);
  236. return true;
  237. }
  238. bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
  239. {
  240. ASSERT_INTERRUPTS_DISABLED();
  241. auto& vmo = region.vmo();
  242. if (vmo.physical_pages()[page_index_in_region]->retain_count() == 1) {
  243. #ifdef PAGE_FAULT_DEBUG
  244. dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
  245. #endif
  246. region.m_cow_map.set(page_index_in_region, false);
  247. remap_region_page(region, page_index_in_region, true);
  248. return true;
  249. }
  250. #ifdef PAGE_FAULT_DEBUG
  251. dbgprintf(" >> It's a COW page and it's time to COW!\n");
  252. #endif
  253. auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
  254. auto physical_page = allocate_physical_page(ShouldZeroFill::No);
  255. byte* dest_ptr = quickmap_page(*physical_page);
  256. const byte* src_ptr = region.laddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  257. #ifdef PAGE_FAULT_DEBUG
  258. dbgprintf(" >> COW P%x <- P%x\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
  259. #endif
  260. memcpy(dest_ptr, src_ptr, PAGE_SIZE);
  261. vmo.physical_pages()[page_index_in_region] = move(physical_page);
  262. unquickmap_page();
  263. region.m_cow_map.set(page_index_in_region, false);
  264. remap_region_page(region, page_index_in_region, true);
  265. return true;
  266. }
  267. bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
  268. {
  269. ASSERT(region.page_directory());
  270. auto& vmo = region.vmo();
  271. ASSERT(!vmo.is_anonymous());
  272. ASSERT(vmo.inode());
  273. auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
  274. InterruptFlagSaver saver;
  275. sti();
  276. LOCKER(vmo.m_paging_lock);
  277. cli();
  278. if (!vmo_page.is_null()) {
  279. dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
  280. remap_region_page(region, page_index_in_region, true);
  281. return true;
  282. }
  283. #ifdef MM_DEBUG
  284. dbgprintf("MM: page_in_from_inode ready to read from inode\n");
  285. #endif
  286. sti();
  287. byte page_buffer[PAGE_SIZE];
  288. auto& inode = *vmo.inode();
  289. auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, page_buffer, nullptr);
  290. if (nread < 0) {
  291. kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
  292. return false;
  293. }
  294. if (nread < PAGE_SIZE) {
  295. // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
  296. memset(page_buffer + nread, 0, PAGE_SIZE - nread);
  297. }
  298. cli();
  299. vmo_page = allocate_physical_page(ShouldZeroFill::No);
  300. if (vmo_page.is_null()) {
  301. kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
  302. return false;
  303. }
  304. remap_region_page(region, page_index_in_region, true);
  305. byte* dest_ptr = region.laddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  306. memcpy(dest_ptr, page_buffer, PAGE_SIZE);
  307. return true;
  308. }
  309. PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
  310. {
  311. ASSERT_INTERRUPTS_DISABLED();
  312. ASSERT(current);
  313. #ifdef PAGE_FAULT_DEBUG
  314. dbgprintf("MM: handle_page_fault(%w) at L%x\n", fault.code(), fault.laddr().get());
  315. #endif
  316. ASSERT(fault.laddr() != m_quickmap_addr);
  317. auto* region = region_from_laddr(current->process(), fault.laddr());
  318. if (!region) {
  319. kprintf("NP(error) fault at invalid address L%x\n", fault.laddr().get());
  320. return PageFaultResponse::ShouldCrash;
  321. }
  322. auto page_index_in_region = region->page_index_from_address(fault.laddr());
  323. if (fault.is_not_present()) {
  324. if (region->vmo().inode()) {
  325. #ifdef PAGE_FAULT_DEBUG
  326. dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
  327. #endif
  328. page_in_from_inode(*region, page_index_in_region);
  329. return PageFaultResponse::Continue;
  330. } else {
  331. #ifdef PAGE_FAULT_DEBUG
  332. dbgprintf("NP(zero) fault in Region{%p}[%u]\n", region, page_index_in_region);
  333. #endif
  334. zero_page(*region, page_index_in_region);
  335. return PageFaultResponse::Continue;
  336. }
  337. } else if (fault.is_protection_violation()) {
  338. if (region->m_cow_map.get(page_index_in_region)) {
  339. #ifdef PAGE_FAULT_DEBUG
  340. dbgprintf("PV(cow) fault in Region{%p}[%u]\n", region, page_index_in_region);
  341. #endif
  342. bool success = copy_on_write(*region, page_index_in_region);
  343. ASSERT(success);
  344. return PageFaultResponse::Continue;
  345. }
  346. kprintf("PV(error) fault in Region{%p}[%u] at L%x\n", region, page_index_in_region, fault.laddr().get());
  347. } else {
  348. ASSERT_NOT_REACHED();
  349. }
  350. return PageFaultResponse::ShouldCrash;
  351. }
  352. RetainPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
  353. {
  354. InterruptDisabler disabler;
  355. // FIXME: We need a linear address space allocator.
  356. static dword next_laddr = 0xd0000000;
  357. ASSERT(!(size % PAGE_SIZE));
  358. LinearAddress laddr(next_laddr);
  359. next_laddr += size + 16384;
  360. auto region = adopt(*new Region(laddr, size, move(name), true, true, false));
  361. MM.map_region_at_address(*m_kernel_page_directory, *region, laddr, false);
  362. return region;
  363. }
  364. RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill)
  365. {
  366. InterruptDisabler disabler;
  367. if (1 > m_free_physical_pages.size()) {
  368. kprintf("FUCK! No physical pages available.\n");
  369. ASSERT_NOT_REACHED();
  370. return { };
  371. }
  372. #ifdef MM_DEBUG
  373. dbgprintf("MM: allocate_physical_page vending P%x (%u remaining)\n", m_free_physical_pages.last()->paddr().get(), m_free_physical_pages.size());
  374. #endif
  375. auto physical_page = m_free_physical_pages.take_last();
  376. if (should_zero_fill == ShouldZeroFill::Yes) {
  377. auto* ptr = (dword*)quickmap_page(*physical_page);
  378. fast_dword_fill(ptr, 0, PAGE_SIZE / sizeof(dword));
  379. unquickmap_page();
  380. }
  381. return physical_page;
  382. }
  383. RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
  384. {
  385. InterruptDisabler disabler;
  386. if (1 > m_free_supervisor_physical_pages.size()) {
  387. kprintf("FUCK! No physical pages available.\n");
  388. ASSERT_NOT_REACHED();
  389. return { };
  390. }
  391. #ifdef MM_DEBUG
  392. dbgprintf("MM: allocate_supervisor_physical_page vending P%x (%u remaining)\n", m_free_supervisor_physical_pages.last()->paddr().get(), m_free_supervisor_physical_pages.size());
  393. #endif
  394. auto physical_page = m_free_supervisor_physical_pages.take_last();
  395. fast_dword_fill((dword*)physical_page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(dword));
  396. return physical_page;
  397. }
  398. void MemoryManager::enter_process_paging_scope(Process& process)
  399. {
  400. ASSERT(current);
  401. InterruptDisabler disabler;
  402. current->tss().cr3 = process.page_directory().cr3();
  403. asm volatile("movl %%eax, %%cr3"::"a"(process.page_directory().cr3()):"memory");
  404. }
  405. void MemoryManager::enter_kernel_paging_scope()
  406. {
  407. InterruptDisabler disabler;
  408. asm volatile("movl %%eax, %%cr3"::"a"(kernel_page_directory().cr3()):"memory");
  409. }
  410. void MemoryManager::flush_entire_tlb()
  411. {
  412. asm volatile(
  413. "mov %%cr3, %%eax\n"
  414. "mov %%eax, %%cr3\n"
  415. ::: "%eax", "memory"
  416. );
  417. }
  418. void MemoryManager::flush_tlb(LinearAddress laddr)
  419. {
  420. asm volatile("invlpg %0": :"m" (*(char*)laddr.get()) : "memory");
  421. }
  422. void MemoryManager::map_for_kernel(LinearAddress laddr, PhysicalAddress paddr)
  423. {
  424. auto pte = ensure_pte(kernel_page_directory(), laddr);
  425. pte.set_physical_page_base(paddr.get());
  426. pte.set_present(true);
  427. pte.set_writable(true);
  428. pte.set_user_allowed(false);
  429. flush_tlb(laddr);
  430. }
  431. byte* MemoryManager::quickmap_page(PhysicalPage& physical_page)
  432. {
  433. ASSERT_INTERRUPTS_DISABLED();
  434. ASSERT(!m_quickmap_in_use);
  435. m_quickmap_in_use = true;
  436. auto page_laddr = m_quickmap_addr;
  437. auto pte = ensure_pte(kernel_page_directory(), page_laddr);
  438. pte.set_physical_page_base(physical_page.paddr().get());
  439. pte.set_present(true);
  440. pte.set_writable(true);
  441. pte.set_user_allowed(false);
  442. flush_tlb(page_laddr);
  443. ASSERT((dword)pte.physical_page_base() == physical_page.paddr().get());
  444. #ifdef MM_DEBUG
  445. dbgprintf("MM: >> quickmap_page L%x => P%x @ PTE=%p\n", page_laddr, physical_page.paddr().get(), pte.ptr());
  446. #endif
  447. return page_laddr.as_ptr();
  448. }
  449. void MemoryManager::unquickmap_page()
  450. {
  451. ASSERT_INTERRUPTS_DISABLED();
  452. ASSERT(m_quickmap_in_use);
  453. auto page_laddr = m_quickmap_addr;
  454. auto pte = ensure_pte(kernel_page_directory(), page_laddr);
  455. #ifdef MM_DEBUG
  456. auto old_physical_address = pte.physical_page_base();
  457. #endif
  458. pte.set_physical_page_base(0);
  459. pte.set_present(false);
  460. pte.set_writable(false);
  461. flush_tlb(page_laddr);
  462. #ifdef MM_DEBUG
  463. dbgprintf("MM: >> unquickmap_page L%x =/> P%x\n", page_laddr, old_physical_address);
  464. #endif
  465. m_quickmap_in_use = false;
  466. }
  467. void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region, bool user_allowed)
  468. {
  469. ASSERT(region.page_directory());
  470. InterruptDisabler disabler;
  471. auto page_laddr = region.laddr().offset(page_index_in_region * PAGE_SIZE);
  472. auto pte = ensure_pte(*region.page_directory(), page_laddr);
  473. auto& physical_page = region.vmo().physical_pages()[page_index_in_region];
  474. ASSERT(physical_page);
  475. pte.set_physical_page_base(physical_page->paddr().get());
  476. pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
  477. if (region.m_cow_map.get(page_index_in_region))
  478. pte.set_writable(false);
  479. else
  480. pte.set_writable(region.is_writable());
  481. pte.set_cache_disabled(!region.vmo().m_allow_cpu_caching);
  482. pte.set_write_through(!region.vmo().m_allow_cpu_caching);
  483. pte.set_user_allowed(user_allowed);
  484. region.page_directory()->flush(page_laddr);
  485. #ifdef MM_DEBUG
  486. dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", region.page_directory()->cr3(), pte.ptr(), region.name().characters(), page_laddr.get(), physical_page->paddr().get(), physical_page.ptr());
  487. #endif
  488. }
  489. void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
  490. {
  491. InterruptDisabler disabler;
  492. ASSERT(region.page_directory() == &page_directory);
  493. map_region_at_address(page_directory, region, region.laddr(), true);
  494. }
  495. void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, LinearAddress laddr, bool user_allowed)
  496. {
  497. InterruptDisabler disabler;
  498. region.set_page_directory(page_directory);
  499. auto& vmo = region.vmo();
  500. #ifdef MM_DEBUG
  501. dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
  502. #endif
  503. for (size_t i = 0; i < region.page_count(); ++i) {
  504. auto page_laddr = laddr.offset(i * PAGE_SIZE);
  505. auto pte = ensure_pte(page_directory, page_laddr);
  506. auto& physical_page = vmo.physical_pages()[region.first_page_index() + i];
  507. if (physical_page) {
  508. pte.set_physical_page_base(physical_page->paddr().get());
  509. pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
  510. // FIXME: It seems wrong that the *region* cow map is essentially using *VMO* relative indices.
  511. if (region.m_cow_map.get(region.first_page_index() + i))
  512. pte.set_writable(false);
  513. else
  514. pte.set_writable(region.is_writable());
  515. pte.set_cache_disabled(!region.vmo().m_allow_cpu_caching);
  516. pte.set_write_through(!region.vmo().m_allow_cpu_caching);
  517. } else {
  518. pte.set_physical_page_base(0);
  519. pte.set_present(false);
  520. pte.set_writable(region.is_writable());
  521. }
  522. pte.set_user_allowed(user_allowed);
  523. page_directory.flush(page_laddr);
  524. #ifdef MM_DEBUG
  525. dbgprintf("MM: >> map_region_at_address (PD=%x) '%s' L%x => P%x (@%p)\n", &page_directory, region.name().characters(), page_laddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
  526. #endif
  527. }
  528. }
  529. bool MemoryManager::unmap_region(Region& region)
  530. {
  531. ASSERT(region.page_directory());
  532. InterruptDisabler disabler;
  533. for (size_t i = 0; i < region.page_count(); ++i) {
  534. auto laddr = region.laddr().offset(i * PAGE_SIZE);
  535. auto pte = ensure_pte(*region.page_directory(), laddr);
  536. pte.set_physical_page_base(0);
  537. pte.set_present(false);
  538. pte.set_writable(false);
  539. pte.set_user_allowed(false);
  540. region.page_directory()->flush(laddr);
  541. #ifdef MM_DEBUG
  542. auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i];
  543. dbgprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, physical_page ? physical_page->paddr().get() : 0);
  544. #endif
  545. }
  546. region.release_page_directory();
  547. return true;
  548. }
  549. bool MemoryManager::map_region(Process& process, Region& region)
  550. {
  551. map_region_at_address(process.page_directory(), region, region.laddr(), true);
  552. return true;
  553. }
  554. bool MemoryManager::validate_user_read(const Process& process, LinearAddress laddr) const
  555. {
  556. auto* region = region_from_laddr(process, laddr);
  557. return region && region->is_readable();
  558. }
  559. bool MemoryManager::validate_user_write(const Process& process, LinearAddress laddr) const
  560. {
  561. auto* region = region_from_laddr(process, laddr);
  562. return region && region->is_writable();
  563. }
  564. void MemoryManager::register_vmo(VMObject& vmo)
  565. {
  566. InterruptDisabler disabler;
  567. m_vmos.set(&vmo);
  568. }
  569. void MemoryManager::unregister_vmo(VMObject& vmo)
  570. {
  571. InterruptDisabler disabler;
  572. m_vmos.remove(&vmo);
  573. }
  574. void MemoryManager::register_region(Region& region)
  575. {
  576. InterruptDisabler disabler;
  577. if (region.laddr().get() >= 0xc0000000)
  578. m_kernel_regions.set(&region);
  579. else
  580. m_user_regions.set(&region);
  581. }
  582. void MemoryManager::unregister_region(Region& region)
  583. {
  584. InterruptDisabler disabler;
  585. if (region.laddr().get() >= 0xc0000000)
  586. m_kernel_regions.remove(&region);
  587. else
  588. m_user_regions.remove(&region);
  589. }
  590. ProcessPagingScope::ProcessPagingScope(Process& process)
  591. {
  592. ASSERT(current);
  593. MM.enter_process_paging_scope(process);
  594. }
  595. ProcessPagingScope::~ProcessPagingScope()
  596. {
  597. MM.enter_process_paging_scope(current->process());
  598. }
  599. KernelPagingScope::KernelPagingScope()
  600. {
  601. ASSERT(current);
  602. MM.enter_kernel_paging_scope();
  603. }
  604. KernelPagingScope::~KernelPagingScope()
  605. {
  606. MM.enter_process_paging_scope(current->process());
  607. }