MemoryManager.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. #include "CMOS.h"
  2. #include "Process.h"
  3. #include "StdLib.h"
  4. #include <AK/Assertions.h>
  5. #include <AK/kstdio.h>
  6. #include <Kernel/Arch/i386/CPU.h>
  7. #include <Kernel/FileSystem/Inode.h>
  8. #include <Kernel/Multiboot.h>
  9. #include <Kernel/VM/MemoryManager.h>
  10. //#define MM_DEBUG
  11. //#define PAGE_FAULT_DEBUG
  12. static MemoryManager* s_the;
  13. MemoryManager& MM
  14. {
  15. return *s_the;
  16. }
  17. MemoryManager::MemoryManager()
  18. {
  19. m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(0x4000));
  20. m_page_table_zero = (PageTableEntry*)0x6000;
  21. m_page_table_one = (PageTableEntry*)0x7000;
  22. initialize_paging();
  23. kprintf("MM initialized.\n");
  24. }
  25. MemoryManager::~MemoryManager()
  26. {
  27. }
  28. void MemoryManager::populate_page_directory(PageDirectory& page_directory)
  29. {
  30. page_directory.m_directory_page = allocate_supervisor_physical_page();
  31. page_directory.entries()[0].copy_from({}, kernel_page_directory().entries()[0]);
  32. page_directory.entries()[1].copy_from({}, kernel_page_directory().entries()[1]);
  33. // Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF
  34. for (int i = 768; i < 1024; ++i)
  35. page_directory.entries()[i].copy_from({}, kernel_page_directory().entries()[i]);
  36. }
  37. void MemoryManager::initialize_paging()
  38. {
  39. memset(m_page_table_zero, 0, PAGE_SIZE);
  40. memset(m_page_table_one, 0, PAGE_SIZE);
  41. #ifdef MM_DEBUG
  42. dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
  43. #endif
  44. #ifdef MM_DEBUG
  45. dbgprintf("MM: Protect against null dereferences\n");
  46. #endif
  47. // Make null dereferences crash.
  48. map_protected(VirtualAddress(0), PAGE_SIZE);
  49. #ifdef MM_DEBUG
  50. dbgprintf("MM: Identity map bottom 5MB\n");
  51. #endif
  52. // The bottom 5 MB (except for the null page) are identity mapped & supervisor only.
  53. // Every process shares these mappings.
  54. create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (5 * MB) - PAGE_SIZE);
  55. // Basic memory map:
  56. // 0 -> 512 kB Kernel code. Root page directory & PDE 0.
  57. // (last page before 1MB) Used by quickmap_page().
  58. // 1 MB -> 3 MB kmalloc_eternal() space.
  59. // 3 MB -> 4 MB kmalloc() space.
  60. // 4 MB -> 5 MB Supervisor physical pages (available for allocation!)
  61. // 5 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
  62. // 0xc0000000-0xffffffff Kernel-only virtual address space
  63. #ifdef MM_DEBUG
  64. dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
  65. #endif
  66. m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE);
  67. RefPtr<PhysicalRegion> region;
  68. bool region_is_super = false;
  69. for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
  70. kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
  71. (u32)(mmap->addr >> 32),
  72. (u32)(mmap->addr & 0xffffffff),
  73. (u32)(mmap->len >> 32),
  74. (u32)(mmap->len & 0xffffffff),
  75. (u32)mmap->type);
  76. if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
  77. continue;
  78. // FIXME: Maybe make use of stuff below the 1MB mark?
  79. if (mmap->addr < (1 * MB))
  80. continue;
  81. #ifdef MM_DEBUG
  82. kprintf("MM: considering memory at %p - %p\n",
  83. (u32)mmap->addr, (u32)(mmap->addr + mmap->len));
  84. #endif
  85. for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
  86. auto addr = PhysicalAddress(page_base);
  87. if (page_base < 4 * MB) {
  88. // nothing
  89. } else if (page_base >= 4 * MB && page_base < 5 * MB) {
  90. if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
  91. m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
  92. region = m_super_physical_regions.last();
  93. region_is_super = true;
  94. } else {
  95. region->expand(region->lower(), addr);
  96. }
  97. } else {
  98. if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
  99. m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
  100. region = &m_user_physical_regions.last();
  101. region_is_super = false;
  102. } else {
  103. region->expand(region->lower(), addr);
  104. }
  105. }
  106. }
  107. }
  108. for (auto& region : m_super_physical_regions)
  109. m_super_physical_pages += region.finalize_capacity();
  110. for (auto& region : m_user_physical_regions)
  111. m_user_physical_pages += region.finalize_capacity();
  112. #ifdef MM_DEBUG
  113. dbgprintf("MM: Installing page directory\n");
  114. #endif
  115. asm volatile("movl %%eax, %%cr3" ::"a"(kernel_page_directory().cr3()));
  116. asm volatile(
  117. "movl %%cr0, %%eax\n"
  118. "orl $0x80000001, %%eax\n"
  119. "movl %%eax, %%cr0\n" ::
  120. : "%eax", "memory");
  121. #ifdef MM_DEBUG
  122. dbgprintf("MM: Paging initialized.\n");
  123. #endif
  124. }
  125. RefPtr<PhysicalPage> MemoryManager::allocate_page_table(PageDirectory& page_directory, unsigned index)
  126. {
  127. ASSERT(!page_directory.m_physical_pages.contains(index));
  128. auto physical_page = allocate_supervisor_physical_page();
  129. if (!physical_page)
  130. return nullptr;
  131. page_directory.m_physical_pages.set(index, physical_page);
  132. return physical_page;
  133. }
  134. void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, VirtualAddress vaddr, size_t size)
  135. {
  136. InterruptDisabler disabler;
  137. // FIXME: ASSERT(vaddr is 4KB aligned);
  138. for (u32 offset = 0; offset < size; offset += PAGE_SIZE) {
  139. auto pte_address = vaddr.offset(offset);
  140. auto& pte = ensure_pte(page_directory, pte_address);
  141. pte.set_physical_page_base(0);
  142. pte.set_user_allowed(false);
  143. pte.set_present(true);
  144. pte.set_writable(true);
  145. flush_tlb(pte_address);
  146. }
  147. }
  148. PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
  149. {
  150. ASSERT_INTERRUPTS_DISABLED();
  151. u32 page_directory_index = (vaddr.get() >> 22) & 0x3ff;
  152. u32 page_table_index = (vaddr.get() >> 12) & 0x3ff;
  153. PageDirectoryEntry& pde = page_directory.entries()[page_directory_index];
  154. if (!pde.is_present()) {
  155. #ifdef MM_DEBUG
  156. dbgprintf("MM: PDE %u not present (requested for L%x), allocating\n", page_directory_index, vaddr.get());
  157. #endif
  158. if (page_directory_index == 0) {
  159. ASSERT(&page_directory == m_kernel_page_directory);
  160. pde.set_page_table_base((u32)m_page_table_zero);
  161. pde.set_user_allowed(false);
  162. pde.set_present(true);
  163. pde.set_writable(true);
  164. } else if (page_directory_index == 1) {
  165. ASSERT(&page_directory == m_kernel_page_directory);
  166. pde.set_page_table_base((u32)m_page_table_one);
  167. pde.set_user_allowed(false);
  168. pde.set_present(true);
  169. pde.set_writable(true);
  170. } else {
  171. //ASSERT(&page_directory != m_kernel_page_directory.ptr());
  172. auto page_table = allocate_page_table(page_directory, page_directory_index);
  173. #ifdef MM_DEBUG
  174. dbgprintf("MM: PD K%x (%s) at P%x allocated page table #%u (for L%x) at P%x\n",
  175. &page_directory,
  176. &page_directory == m_kernel_page_directory ? "Kernel" : "User",
  177. page_directory.cr3(),
  178. page_directory_index,
  179. vaddr.get(),
  180. page_table->paddr().get());
  181. #endif
  182. pde.set_page_table_base(page_table->paddr().get());
  183. pde.set_user_allowed(true);
  184. pde.set_present(true);
  185. pde.set_writable(true);
  186. page_directory.m_physical_pages.set(page_directory_index, move(page_table));
  187. }
  188. }
  189. return pde.page_table_base()[page_table_index];
  190. }
  191. void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
  192. {
  193. InterruptDisabler disabler;
  194. ASSERT(vaddr.is_page_aligned());
  195. for (u32 offset = 0; offset < length; offset += PAGE_SIZE) {
  196. auto pte_address = vaddr.offset(offset);
  197. auto& pte = ensure_pte(kernel_page_directory(), pte_address);
  198. pte.set_physical_page_base(pte_address.get());
  199. pte.set_user_allowed(false);
  200. pte.set_present(false);
  201. pte.set_writable(false);
  202. flush_tlb(pte_address);
  203. }
  204. }
  205. void MemoryManager::create_identity_mapping(PageDirectory& page_directory, VirtualAddress vaddr, size_t size)
  206. {
  207. InterruptDisabler disabler;
  208. ASSERT((vaddr.get() & ~PAGE_MASK) == 0);
  209. for (u32 offset = 0; offset < size; offset += PAGE_SIZE) {
  210. auto pte_address = vaddr.offset(offset);
  211. auto& pte = ensure_pte(page_directory, pte_address);
  212. pte.set_physical_page_base(pte_address.get());
  213. pte.set_user_allowed(false);
  214. pte.set_present(true);
  215. pte.set_writable(true);
  216. page_directory.flush(pte_address);
  217. }
  218. }
  219. void MemoryManager::initialize()
  220. {
  221. s_the = new MemoryManager;
  222. }
  223. Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
  224. {
  225. ASSERT(vaddr.get() >= 0xc0000000);
  226. for (auto& region : MM.m_kernel_regions) {
  227. if (region->contains(vaddr))
  228. return region;
  229. }
  230. return nullptr;
  231. }
  232. Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress vaddr)
  233. {
  234. // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
  235. for (auto& region : process.m_regions) {
  236. if (region.contains(vaddr))
  237. return &region;
  238. }
  239. dbg() << process << " Couldn't find user region for " << vaddr;
  240. return nullptr;
  241. }
  242. Region* MemoryManager::region_from_vaddr(Process& process, VirtualAddress vaddr)
  243. {
  244. ASSERT_INTERRUPTS_DISABLED();
  245. if (vaddr.get() >= 0xc0000000)
  246. return kernel_region_from_vaddr(vaddr);
  247. return user_region_from_vaddr(process, vaddr);
  248. }
  249. const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAddress vaddr)
  250. {
  251. if (vaddr.get() >= 0xc0000000)
  252. return kernel_region_from_vaddr(vaddr);
  253. return user_region_from_vaddr(const_cast<Process&>(process), vaddr);
  254. }
  255. bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
  256. {
  257. ASSERT_INTERRUPTS_DISABLED();
  258. auto& vmo = region.vmo();
  259. auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
  260. sti();
  261. LOCKER(vmo.m_paging_lock);
  262. cli();
  263. if (!vmo_page.is_null()) {
  264. #ifdef PAGE_FAULT_DEBUG
  265. dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
  266. #endif
  267. remap_region_page(region, page_index_in_region);
  268. return true;
  269. }
  270. auto physical_page = allocate_user_physical_page(ShouldZeroFill::Yes);
  271. #ifdef PAGE_FAULT_DEBUG
  272. dbgprintf(" >> ZERO P%x\n", physical_page->paddr().get());
  273. #endif
  274. region.set_should_cow(page_index_in_region, false);
  275. vmo.physical_pages()[page_index_in_region] = move(physical_page);
  276. remap_region_page(region, page_index_in_region);
  277. return true;
  278. }
  279. bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
  280. {
  281. ASSERT_INTERRUPTS_DISABLED();
  282. auto& vmo = region.vmo();
  283. if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) {
  284. #ifdef PAGE_FAULT_DEBUG
  285. dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
  286. #endif
  287. region.set_should_cow(page_index_in_region, false);
  288. remap_region_page(region, page_index_in_region);
  289. return true;
  290. }
  291. #ifdef PAGE_FAULT_DEBUG
  292. dbgprintf(" >> It's a COW page and it's time to COW!\n");
  293. #endif
  294. auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
  295. auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
  296. u8* dest_ptr = quickmap_page(*physical_page);
  297. const u8* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  298. #ifdef PAGE_FAULT_DEBUG
  299. dbgprintf(" >> COW P%x <- P%x\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
  300. #endif
  301. memcpy(dest_ptr, src_ptr, PAGE_SIZE);
  302. vmo.physical_pages()[page_index_in_region] = move(physical_page);
  303. unquickmap_page();
  304. region.set_should_cow(page_index_in_region, false);
  305. remap_region_page(region, page_index_in_region);
  306. return true;
  307. }
  308. bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
  309. {
  310. ASSERT(region.page_directory());
  311. auto& vmo = region.vmo();
  312. ASSERT(!vmo.is_anonymous());
  313. ASSERT(vmo.inode());
  314. auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
  315. InterruptFlagSaver saver;
  316. sti();
  317. LOCKER(vmo.m_paging_lock);
  318. cli();
  319. if (!vmo_page.is_null()) {
  320. dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
  321. remap_region_page(region, page_index_in_region);
  322. return true;
  323. }
  324. #ifdef MM_DEBUG
  325. dbgprintf("MM: page_in_from_inode ready to read from inode\n");
  326. #endif
  327. sti();
  328. u8 page_buffer[PAGE_SIZE];
  329. auto& inode = *vmo.inode();
  330. auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, page_buffer, nullptr);
  331. if (nread < 0) {
  332. kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
  333. return false;
  334. }
  335. if (nread < PAGE_SIZE) {
  336. // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
  337. memset(page_buffer + nread, 0, PAGE_SIZE - nread);
  338. }
  339. cli();
  340. vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
  341. if (vmo_page.is_null()) {
  342. kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
  343. return false;
  344. }
  345. remap_region_page(region, page_index_in_region);
  346. u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  347. memcpy(dest_ptr, page_buffer, PAGE_SIZE);
  348. return true;
  349. }
  350. PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
  351. {
  352. ASSERT_INTERRUPTS_DISABLED();
  353. ASSERT(current);
  354. #ifdef PAGE_FAULT_DEBUG
  355. dbgprintf("MM: handle_page_fault(%w) at L%x\n", fault.code(), fault.vaddr().get());
  356. #endif
  357. ASSERT(fault.vaddr() != m_quickmap_addr);
  358. if (fault.type() == PageFault::Type::PageNotPresent && fault.vaddr().get() >= 0xc0000000) {
  359. u32 page_directory_index = (fault.vaddr().get() >> 22) & 0x3ff;
  360. auto& kernel_pde = kernel_page_directory().entries()[page_directory_index];
  361. if (kernel_pde.is_present()) {
  362. dbgprintf("NP(kernel): copying new kernel mapping for L%x into current page directory\n", fault.vaddr().get());
  363. auto* current_page_directory = reinterpret_cast<PageDirectoryEntry*>(cpu_cr3());
  364. current_page_directory[page_directory_index].copy_from({}, kernel_pde);
  365. flush_tlb(fault.vaddr().page_base());
  366. return PageFaultResponse::Continue;
  367. }
  368. }
  369. auto* region = region_from_vaddr(current->process(), fault.vaddr());
  370. if (!region) {
  371. kprintf("NP(error) fault at invalid address L%x\n", fault.vaddr().get());
  372. return PageFaultResponse::ShouldCrash;
  373. }
  374. auto page_index_in_region = region->page_index_from_address(fault.vaddr());
  375. if (fault.type() == PageFault::Type::PageNotPresent) {
  376. if (region->vmo().inode()) {
  377. #ifdef PAGE_FAULT_DEBUG
  378. dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
  379. #endif
  380. page_in_from_inode(*region, page_index_in_region);
  381. return PageFaultResponse::Continue;
  382. }
  383. #ifdef PAGE_FAULT_DEBUG
  384. dbgprintf("NP(zero) fault in Region{%p}[%u]\n", region, page_index_in_region);
  385. #endif
  386. zero_page(*region, page_index_in_region);
  387. return PageFaultResponse::Continue;
  388. }
  389. ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
  390. if (fault.access() == PageFault::Access::Write && region->should_cow(page_index_in_region)) {
  391. #ifdef PAGE_FAULT_DEBUG
  392. dbgprintf("PV(cow) fault in Region{%p}[%u]\n", region, page_index_in_region);
  393. #endif
  394. bool success = copy_on_write(*region, page_index_in_region);
  395. ASSERT(success);
  396. return PageFaultResponse::Continue;
  397. }
  398. kprintf("PV(error) fault in Region{%p}[%u] at L%x\n", region, page_index_in_region, fault.vaddr().get());
  399. return PageFaultResponse::ShouldCrash;
  400. }
  401. RefPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, bool user_accessible)
  402. {
  403. InterruptDisabler disabler;
  404. ASSERT(!(size % PAGE_SIZE));
  405. auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
  406. ASSERT(range.is_valid());
  407. RefPtr<Region> region;
  408. if (user_accessible)
  409. region = Region::create_user_accessible(range, name, PROT_READ | PROT_WRITE | PROT_EXEC, false);
  410. else
  411. region = Region::create_kernel_only(range, name, PROT_READ | PROT_WRITE | PROT_EXEC, false);
  412. MM.map_region_at_address(*m_kernel_page_directory, *region, range.base());
  413. // FIXME: It would be cool if these could zero-fill on demand instead.
  414. region->commit();
  415. return region;
  416. }
  417. RefPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name)
  418. {
  419. return allocate_kernel_region(size, name, true);
  420. }
  421. void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
  422. {
  423. for (auto& region : m_user_physical_regions) {
  424. if (!region.contains(page)) {
  425. kprintf(
  426. "MM: deallocate_user_physical_page: %p not in %p -> %p\n",
  427. page.paddr(), region.lower().get(), region.upper().get());
  428. continue;
  429. }
  430. region.return_page(move(page));
  431. --m_user_physical_pages_used;
  432. return;
  433. }
  434. kprintf("MM: deallocate_user_physical_page couldn't figure out region for user page @ %p\n", page.paddr());
  435. ASSERT_NOT_REACHED();
  436. }
  437. RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
  438. {
  439. InterruptDisabler disabler;
  440. RefPtr<PhysicalPage> page;
  441. for (auto& region : m_user_physical_regions) {
  442. page = region.take_free_page(false);
  443. if (page.is_null())
  444. continue;
  445. }
  446. if (!page) {
  447. if (m_user_physical_regions.is_empty()) {
  448. kprintf("MM: no user physical regions available (?)\n");
  449. }
  450. kprintf("MM: no user physical pages available\n");
  451. ASSERT_NOT_REACHED();
  452. return {};
  453. }
  454. #ifdef MM_DEBUG
  455. dbgprintf("MM: allocate_user_physical_page vending P%p\n", page->paddr().get());
  456. #endif
  457. if (should_zero_fill == ShouldZeroFill::Yes) {
  458. auto* ptr = (u32*)quickmap_page(*page);
  459. fast_u32_fill(ptr, 0, PAGE_SIZE / sizeof(u32));
  460. unquickmap_page();
  461. }
  462. ++m_user_physical_pages_used;
  463. return page;
  464. }
  465. void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
  466. {
  467. for (auto& region : m_super_physical_regions) {
  468. if (!region.contains(page)) {
  469. kprintf(
  470. "MM: deallocate_supervisor_physical_page: %p not in %p -> %p\n",
  471. page.paddr(), region.lower().get(), region.upper().get());
  472. continue;
  473. }
  474. region.return_page(move(page));
  475. --m_super_physical_pages_used;
  476. return;
  477. }
  478. kprintf("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ %p\n", page.paddr());
  479. ASSERT_NOT_REACHED();
  480. }
  481. RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
  482. {
  483. InterruptDisabler disabler;
  484. RefPtr<PhysicalPage> page;
  485. for (auto& region : m_super_physical_regions) {
  486. page = region.take_free_page(true);
  487. if (page.is_null())
  488. continue;
  489. }
  490. if (!page) {
  491. if (m_super_physical_regions.is_empty()) {
  492. kprintf("MM: no super physical regions available (?)\n");
  493. }
  494. kprintf("MM: no super physical pages available\n");
  495. ASSERT_NOT_REACHED();
  496. return {};
  497. }
  498. #ifdef MM_DEBUG
  499. dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
  500. #endif
  501. fast_u32_fill((u32*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(u32));
  502. ++m_super_physical_pages_used;
  503. return page;
  504. }
  505. void MemoryManager::enter_process_paging_scope(Process& process)
  506. {
  507. ASSERT(current);
  508. InterruptDisabler disabler;
  509. current->tss().cr3 = process.page_directory().cr3();
  510. asm volatile("movl %%eax, %%cr3" ::"a"(process.page_directory().cr3())
  511. : "memory");
  512. }
  513. void MemoryManager::flush_entire_tlb()
  514. {
  515. asm volatile(
  516. "mov %%cr3, %%eax\n"
  517. "mov %%eax, %%cr3\n" ::
  518. : "%eax", "memory");
  519. }
  520. void MemoryManager::flush_tlb(VirtualAddress vaddr)
  521. {
  522. asm volatile("invlpg %0"
  523. :
  524. : "m"(*(char*)vaddr.get())
  525. : "memory");
  526. }
  527. void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr)
  528. {
  529. auto& pte = ensure_pte(kernel_page_directory(), vaddr);
  530. pte.set_physical_page_base(paddr.get());
  531. pte.set_present(true);
  532. pte.set_writable(true);
  533. pte.set_user_allowed(false);
  534. flush_tlb(vaddr);
  535. }
  536. u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
  537. {
  538. ASSERT_INTERRUPTS_DISABLED();
  539. ASSERT(!m_quickmap_in_use);
  540. m_quickmap_in_use = true;
  541. auto page_vaddr = m_quickmap_addr;
  542. auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
  543. pte.set_physical_page_base(physical_page.paddr().get());
  544. pte.set_present(true);
  545. pte.set_writable(true);
  546. pte.set_user_allowed(false);
  547. flush_tlb(page_vaddr);
  548. ASSERT((u32)pte.physical_page_base() == physical_page.paddr().get());
  549. #ifdef MM_DEBUG
  550. dbgprintf("MM: >> quickmap_page L%x => P%x @ PTE=%p\n", page_vaddr, physical_page.paddr().get(), pte.ptr());
  551. #endif
  552. return page_vaddr.as_ptr();
  553. }
  554. void MemoryManager::unquickmap_page()
  555. {
  556. ASSERT_INTERRUPTS_DISABLED();
  557. ASSERT(m_quickmap_in_use);
  558. auto page_vaddr = m_quickmap_addr;
  559. auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
  560. #ifdef MM_DEBUG
  561. auto old_physical_address = pte.physical_page_base();
  562. #endif
  563. pte.set_physical_page_base(0);
  564. pte.set_present(false);
  565. pte.set_writable(false);
  566. flush_tlb(page_vaddr);
  567. #ifdef MM_DEBUG
  568. dbgprintf("MM: >> unquickmap_page L%x =/> P%x\n", page_vaddr, old_physical_address);
  569. #endif
  570. m_quickmap_in_use = false;
  571. }
  572. void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region)
  573. {
  574. ASSERT(region.page_directory());
  575. InterruptDisabler disabler;
  576. auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
  577. auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
  578. auto& physical_page = region.vmo().physical_pages()[page_index_in_region];
  579. ASSERT(physical_page);
  580. pte.set_physical_page_base(physical_page->paddr().get());
  581. pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
  582. if (region.should_cow(page_index_in_region))
  583. pte.set_writable(false);
  584. else
  585. pte.set_writable(region.is_writable());
  586. pte.set_cache_disabled(!region.vmo().m_allow_cpu_caching);
  587. pte.set_write_through(!region.vmo().m_allow_cpu_caching);
  588. pte.set_user_allowed(region.is_user_accessible());
  589. region.page_directory()->flush(page_vaddr);
  590. #ifdef MM_DEBUG
  591. dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", region.page_directory()->cr3(), pte.ptr(), region.name().characters(), page_vaddr.get(), physical_page->paddr().get(), physical_page.ptr());
  592. #endif
  593. }
  594. void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
  595. {
  596. InterruptDisabler disabler;
  597. ASSERT(region.page_directory() == &page_directory);
  598. map_region_at_address(page_directory, region, region.vaddr());
  599. }
  600. void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, VirtualAddress vaddr)
  601. {
  602. InterruptDisabler disabler;
  603. region.set_page_directory(page_directory);
  604. auto& vmo = region.vmo();
  605. #ifdef MM_DEBUG
  606. dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
  607. #endif
  608. for (size_t i = 0; i < region.page_count(); ++i) {
  609. auto page_vaddr = vaddr.offset(i * PAGE_SIZE);
  610. auto& pte = ensure_pte(page_directory, page_vaddr);
  611. auto& physical_page = vmo.physical_pages()[region.first_page_index() + i];
  612. if (physical_page) {
  613. pte.set_physical_page_base(physical_page->paddr().get());
  614. pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
  615. // FIXME: It seems wrong that the *region* cow map is essentially using *VMO* relative indices.
  616. if (region.should_cow(region.first_page_index() + i))
  617. pte.set_writable(false);
  618. else
  619. pte.set_writable(region.is_writable());
  620. pte.set_cache_disabled(!region.vmo().m_allow_cpu_caching);
  621. pte.set_write_through(!region.vmo().m_allow_cpu_caching);
  622. } else {
  623. pte.set_physical_page_base(0);
  624. pte.set_present(false);
  625. pte.set_writable(region.is_writable());
  626. }
  627. pte.set_user_allowed(region.is_user_accessible());
  628. page_directory.flush(page_vaddr);
  629. #ifdef MM_DEBUG
  630. dbgprintf("MM: >> map_region_at_address (PD=%x) '%s' L%x => P%x (@%p)\n", &page_directory, region.name().characters(), page_vaddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
  631. #endif
  632. }
  633. }
  634. bool MemoryManager::unmap_region(Region& region)
  635. {
  636. ASSERT(region.page_directory());
  637. InterruptDisabler disabler;
  638. for (size_t i = 0; i < region.page_count(); ++i) {
  639. auto vaddr = region.vaddr().offset(i * PAGE_SIZE);
  640. auto& pte = ensure_pte(*region.page_directory(), vaddr);
  641. pte.set_physical_page_base(0);
  642. pte.set_present(false);
  643. pte.set_writable(false);
  644. pte.set_user_allowed(false);
  645. region.page_directory()->flush(vaddr);
  646. #ifdef MM_DEBUG
  647. auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i];
  648. dbgprintf("MM: >> Unmapped L%x => P%x <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
  649. #endif
  650. }
  651. region.release_page_directory();
  652. return true;
  653. }
  654. bool MemoryManager::map_region(Process& process, Region& region)
  655. {
  656. map_region_at_address(process.page_directory(), region, region.vaddr());
  657. return true;
  658. }
  659. bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
  660. {
  661. auto* region = region_from_vaddr(process, vaddr);
  662. return region && region->is_readable();
  663. }
  664. bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr) const
  665. {
  666. auto* region = region_from_vaddr(process, vaddr);
  667. return region && region->is_writable();
  668. }
  669. void MemoryManager::register_vmo(VMObject& vmo)
  670. {
  671. InterruptDisabler disabler;
  672. m_vmos.set(&vmo);
  673. }
  674. void MemoryManager::unregister_vmo(VMObject& vmo)
  675. {
  676. InterruptDisabler disabler;
  677. m_vmos.remove(&vmo);
  678. }
  679. void MemoryManager::register_region(Region& region)
  680. {
  681. InterruptDisabler disabler;
  682. if (region.vaddr().get() >= 0xc0000000)
  683. m_kernel_regions.set(&region);
  684. else
  685. m_user_regions.set(&region);
  686. }
  687. void MemoryManager::unregister_region(Region& region)
  688. {
  689. InterruptDisabler disabler;
  690. if (region.vaddr().get() >= 0xc0000000)
  691. m_kernel_regions.remove(&region);
  692. else
  693. m_user_regions.remove(&region);
  694. }
  695. ProcessPagingScope::ProcessPagingScope(Process& process)
  696. {
  697. ASSERT(current);
  698. MM.enter_process_paging_scope(process);
  699. }
  700. ProcessPagingScope::~ProcessPagingScope()
  701. {
  702. MM.enter_process_paging_scope(current->process());
  703. }