MemoryManager.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803
  1. #include "CMOS.h"
  2. #include "Process.h"
  3. #include "StdLib.h"
  4. #include <AK/Assertions.h>
  5. #include <AK/kstdio.h>
  6. #include <Kernel/Arch/i386/CPU.h>
  7. #include <Kernel/FileSystem/Inode.h>
  8. #include <Kernel/Multiboot.h>
  9. #include <Kernel/VM/AnonymousVMObject.h>
  10. #include <Kernel/VM/InodeVMObject.h>
  11. #include <Kernel/VM/MemoryManager.h>
  12. //#define MM_DEBUG
  13. //#define PAGE_FAULT_DEBUG
  14. static MemoryManager* s_the;
  15. MemoryManager& MM
  16. {
  17. return *s_the;
  18. }
  19. MemoryManager::MemoryManager()
  20. {
  21. m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(0x4000));
  22. m_page_table_zero = (PageTableEntry*)0x6000;
  23. m_page_table_one = (PageTableEntry*)0x7000;
  24. initialize_paging();
  25. kprintf("MM initialized.\n");
  26. }
  27. MemoryManager::~MemoryManager()
  28. {
  29. }
  30. void MemoryManager::populate_page_directory(PageDirectory& page_directory)
  31. {
  32. page_directory.m_directory_page = allocate_supervisor_physical_page();
  33. page_directory.entries()[0].copy_from({}, kernel_page_directory().entries()[0]);
  34. page_directory.entries()[1].copy_from({}, kernel_page_directory().entries()[1]);
  35. // Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF
  36. for (int i = 768; i < 1024; ++i)
  37. page_directory.entries()[i].copy_from({}, kernel_page_directory().entries()[i]);
  38. }
  39. void MemoryManager::initialize_paging()
  40. {
  41. memset(m_page_table_zero, 0, PAGE_SIZE);
  42. memset(m_page_table_one, 0, PAGE_SIZE);
  43. #ifdef MM_DEBUG
  44. dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
  45. #endif
  46. #ifdef MM_DEBUG
  47. dbgprintf("MM: Protect against null dereferences\n");
  48. #endif
  49. // Make null dereferences crash.
  50. map_protected(VirtualAddress(0), PAGE_SIZE);
  51. #ifdef MM_DEBUG
  52. dbgprintf("MM: Identity map bottom 5MB\n");
  53. #endif
  54. // The bottom 5 MB (except for the null page) are identity mapped & supervisor only.
  55. // Every process shares these mappings.
  56. create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (5 * MB) - PAGE_SIZE);
  57. // Basic memory map:
  58. // 0 -> 512 kB Kernel code. Root page directory & PDE 0.
  59. // (last page before 1MB) Used by quickmap_page().
  60. // 1 MB -> 3 MB kmalloc_eternal() space.
  61. // 3 MB -> 4 MB kmalloc() space.
  62. // 4 MB -> 5 MB Supervisor physical pages (available for allocation!)
  63. // 5 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
  64. // 0xc0000000-0xffffffff Kernel-only virtual address space
  65. #ifdef MM_DEBUG
  66. dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
  67. #endif
  68. m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE);
  69. RefPtr<PhysicalRegion> region;
  70. bool region_is_super = false;
  71. for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
  72. kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
  73. (u32)(mmap->addr >> 32),
  74. (u32)(mmap->addr & 0xffffffff),
  75. (u32)(mmap->len >> 32),
  76. (u32)(mmap->len & 0xffffffff),
  77. (u32)mmap->type);
  78. if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
  79. continue;
  80. // FIXME: Maybe make use of stuff below the 1MB mark?
  81. if (mmap->addr < (1 * MB))
  82. continue;
  83. if ((mmap->addr + mmap->len) > 0xffffffff)
  84. continue;
  85. auto diff = (u32)mmap->addr % PAGE_SIZE;
  86. if (diff != 0) {
  87. kprintf("MM: got an unaligned region base from the bootloader; correcting %p by %d bytes\n", mmap->addr, diff);
  88. diff = PAGE_SIZE - diff;
  89. mmap->addr += diff;
  90. mmap->len -= diff;
  91. }
  92. if ((mmap->len % PAGE_SIZE) != 0) {
  93. kprintf("MM: got an unaligned region length from the bootloader; correcting %d by %d bytes\n", mmap->len, mmap->len % PAGE_SIZE);
  94. mmap->len -= mmap->len % PAGE_SIZE;
  95. }
  96. if (mmap->len < PAGE_SIZE) {
  97. kprintf("MM: memory region from bootloader is too small; we want >= %d bytes, but got %d bytes\n", PAGE_SIZE, mmap->len);
  98. continue;
  99. }
  100. #ifdef MM_DEBUG
  101. kprintf("MM: considering memory at %p - %p\n",
  102. (u32)mmap->addr, (u32)(mmap->addr + mmap->len));
  103. #endif
  104. for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
  105. auto addr = PhysicalAddress(page_base);
  106. if (page_base < 4 * MB) {
  107. // nothing
  108. } else if (page_base >= 4 * MB && page_base < 5 * MB) {
  109. if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
  110. m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
  111. region = m_super_physical_regions.last();
  112. region_is_super = true;
  113. } else {
  114. region->expand(region->lower(), addr);
  115. }
  116. } else {
  117. if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
  118. m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
  119. region = &m_user_physical_regions.last();
  120. region_is_super = false;
  121. } else {
  122. region->expand(region->lower(), addr);
  123. }
  124. }
  125. }
  126. }
  127. for (auto& region : m_super_physical_regions)
  128. m_super_physical_pages += region.finalize_capacity();
  129. for (auto& region : m_user_physical_regions)
  130. m_user_physical_pages += region.finalize_capacity();
  131. #ifdef MM_DEBUG
  132. dbgprintf("MM: Installing page directory\n");
  133. #endif
  134. asm volatile("movl %%eax, %%cr3" ::"a"(kernel_page_directory().cr3()));
  135. asm volatile(
  136. "movl %%cr0, %%eax\n"
  137. "orl $0x80000001, %%eax\n"
  138. "movl %%eax, %%cr0\n" ::
  139. : "%eax", "memory");
  140. #ifdef MM_DEBUG
  141. dbgprintf("MM: Paging initialized.\n");
  142. #endif
  143. }
  144. PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
  145. {
  146. ASSERT_INTERRUPTS_DISABLED();
  147. u32 page_directory_index = (vaddr.get() >> 22) & 0x3ff;
  148. u32 page_table_index = (vaddr.get() >> 12) & 0x3ff;
  149. PageDirectoryEntry& pde = page_directory.entries()[page_directory_index];
  150. if (!pde.is_present()) {
  151. #ifdef MM_DEBUG
  152. dbgprintf("MM: PDE %u not present (requested for V%p), allocating\n", page_directory_index, vaddr.get());
  153. #endif
  154. if (page_directory_index == 0) {
  155. ASSERT(&page_directory == m_kernel_page_directory);
  156. pde.set_page_table_base((u32)m_page_table_zero);
  157. pde.set_user_allowed(false);
  158. pde.set_present(true);
  159. pde.set_writable(true);
  160. } else if (page_directory_index == 1) {
  161. ASSERT(&page_directory == m_kernel_page_directory);
  162. pde.set_page_table_base((u32)m_page_table_one);
  163. pde.set_user_allowed(false);
  164. pde.set_present(true);
  165. pde.set_writable(true);
  166. } else {
  167. //ASSERT(&page_directory != m_kernel_page_directory.ptr());
  168. auto page_table = allocate_supervisor_physical_page();
  169. #ifdef MM_DEBUG
  170. dbgprintf("MM: PD K%p (%s) at P%p allocated page table #%u (for V%p) at P%p\n",
  171. &page_directory,
  172. &page_directory == m_kernel_page_directory ? "Kernel" : "User",
  173. page_directory.cr3(),
  174. page_directory_index,
  175. vaddr.get(),
  176. page_table->paddr().get());
  177. #endif
  178. pde.set_page_table_base(page_table->paddr().get());
  179. pde.set_user_allowed(true);
  180. pde.set_present(true);
  181. pde.set_writable(true);
  182. page_directory.m_physical_pages.set(page_directory_index, move(page_table));
  183. }
  184. }
  185. return pde.page_table_base()[page_table_index];
  186. }
  187. void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
  188. {
  189. InterruptDisabler disabler;
  190. ASSERT(vaddr.is_page_aligned());
  191. for (u32 offset = 0; offset < length; offset += PAGE_SIZE) {
  192. auto pte_address = vaddr.offset(offset);
  193. auto& pte = ensure_pte(kernel_page_directory(), pte_address);
  194. pte.set_physical_page_base(pte_address.get());
  195. pte.set_user_allowed(false);
  196. pte.set_present(false);
  197. pte.set_writable(false);
  198. flush_tlb(pte_address);
  199. }
  200. }
  201. void MemoryManager::create_identity_mapping(PageDirectory& page_directory, VirtualAddress vaddr, size_t size)
  202. {
  203. InterruptDisabler disabler;
  204. ASSERT((vaddr.get() & ~PAGE_MASK) == 0);
  205. for (u32 offset = 0; offset < size; offset += PAGE_SIZE) {
  206. auto pte_address = vaddr.offset(offset);
  207. auto& pte = ensure_pte(page_directory, pte_address);
  208. pte.set_physical_page_base(pte_address.get());
  209. pte.set_user_allowed(false);
  210. pte.set_present(true);
  211. pte.set_writable(true);
  212. page_directory.flush(pte_address);
  213. }
  214. }
  215. void MemoryManager::initialize()
  216. {
  217. s_the = new MemoryManager;
  218. }
  219. Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
  220. {
  221. if (vaddr.get() < 0xc0000000)
  222. return nullptr;
  223. for (auto& region : MM.m_kernel_regions) {
  224. if (region.contains(vaddr))
  225. return &region;
  226. }
  227. return nullptr;
  228. }
  229. Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress vaddr)
  230. {
  231. // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
  232. for (auto& region : process.m_regions) {
  233. if (region.contains(vaddr))
  234. return &region;
  235. }
  236. dbg() << process << " Couldn't find user region for " << vaddr;
  237. return nullptr;
  238. }
  239. Region* MemoryManager::region_from_vaddr(Process& process, VirtualAddress vaddr)
  240. {
  241. ASSERT_INTERRUPTS_DISABLED();
  242. if (auto* region = kernel_region_from_vaddr(vaddr))
  243. return region;
  244. return user_region_from_vaddr(process, vaddr);
  245. }
  246. const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAddress vaddr)
  247. {
  248. if (auto* region = kernel_region_from_vaddr(vaddr))
  249. return region;
  250. return user_region_from_vaddr(const_cast<Process&>(process), vaddr);
  251. }
  252. bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
  253. {
  254. ASSERT_INTERRUPTS_DISABLED();
  255. auto& vmo = region.vmobject();
  256. auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
  257. sti();
  258. LOCKER(vmo.m_paging_lock);
  259. cli();
  260. if (!vmo_page.is_null()) {
  261. #ifdef PAGE_FAULT_DEBUG
  262. dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
  263. #endif
  264. remap_region_page(region, page_index_in_region);
  265. return true;
  266. }
  267. auto physical_page = allocate_user_physical_page(ShouldZeroFill::Yes);
  268. #ifdef PAGE_FAULT_DEBUG
  269. dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get());
  270. #endif
  271. vmo.physical_pages()[page_index_in_region] = move(physical_page);
  272. remap_region_page(region, page_index_in_region);
  273. return true;
  274. }
  275. bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
  276. {
  277. ASSERT_INTERRUPTS_DISABLED();
  278. auto& vmo = region.vmobject();
  279. if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) {
  280. #ifdef PAGE_FAULT_DEBUG
  281. dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
  282. #endif
  283. region.set_should_cow(page_index_in_region, false);
  284. remap_region_page(region, page_index_in_region);
  285. return true;
  286. }
  287. #ifdef PAGE_FAULT_DEBUG
  288. dbgprintf(" >> It's a COW page and it's time to COW!\n");
  289. #endif
  290. auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
  291. auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
  292. u8* dest_ptr = quickmap_page(*physical_page);
  293. const u8* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  294. #ifdef PAGE_FAULT_DEBUG
  295. dbgprintf(" >> COW P%p <- P%p\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
  296. #endif
  297. memcpy(dest_ptr, src_ptr, PAGE_SIZE);
  298. vmo.physical_pages()[page_index_in_region] = move(physical_page);
  299. unquickmap_page();
  300. region.set_should_cow(page_index_in_region, false);
  301. remap_region_page(region, page_index_in_region);
  302. return true;
  303. }
  304. bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
  305. {
  306. ASSERT(region.page_directory());
  307. auto& vmo = region.vmobject();
  308. ASSERT(vmo.is_inode());
  309. auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
  310. auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
  311. InterruptFlagSaver saver;
  312. sti();
  313. LOCKER(vmo.m_paging_lock);
  314. cli();
  315. if (!vmo_page.is_null()) {
  316. #ifdef PAGE_FAULT_DEBUG
  317. dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
  318. #endif
  319. remap_region_page(region, page_index_in_region);
  320. return true;
  321. }
  322. #ifdef MM_DEBUG
  323. dbgprintf("MM: page_in_from_inode ready to read from inode\n");
  324. #endif
  325. sti();
  326. u8 page_buffer[PAGE_SIZE];
  327. auto& inode = inode_vmobject.inode();
  328. auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr);
  329. if (nread < 0) {
  330. kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
  331. return false;
  332. }
  333. if (nread < PAGE_SIZE) {
  334. // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
  335. memset(page_buffer + nread, 0, PAGE_SIZE - nread);
  336. }
  337. cli();
  338. vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
  339. if (vmo_page.is_null()) {
  340. kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
  341. return false;
  342. }
  343. remap_region_page(region, page_index_in_region);
  344. u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  345. memcpy(dest_ptr, page_buffer, PAGE_SIZE);
  346. return true;
  347. }
  348. Region* MemoryManager::region_from_vaddr(VirtualAddress vaddr)
  349. {
  350. if (auto* region = kernel_region_from_vaddr(vaddr))
  351. return region;
  352. auto page_directory = PageDirectory::find_by_pdb(cpu_cr3());
  353. if (!page_directory)
  354. return nullptr;
  355. ASSERT(page_directory->process());
  356. return user_region_from_vaddr(*page_directory->process(), vaddr);
  357. }
  358. PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
  359. {
  360. ASSERT_INTERRUPTS_DISABLED();
  361. ASSERT(current);
  362. #ifdef PAGE_FAULT_DEBUG
  363. dbgprintf("MM: handle_page_fault(%w) at V%p\n", fault.code(), fault.vaddr().get());
  364. #endif
  365. ASSERT(fault.vaddr() != m_quickmap_addr);
  366. if (fault.type() == PageFault::Type::PageNotPresent && fault.vaddr().get() >= 0xc0000000) {
  367. auto* current_page_directory = reinterpret_cast<PageDirectoryEntry*>(cpu_cr3());
  368. u32 page_directory_index = (fault.vaddr().get() >> 22) & 0x3ff;
  369. auto& kernel_pde = kernel_page_directory().entries()[page_directory_index];
  370. auto& current_pde = current_page_directory[page_directory_index];
  371. if (kernel_pde.is_present() && !current_pde.is_present()) {
  372. dbg() << "NP(kernel): Copying new kernel mapping for " << fault.vaddr() << " into current page directory";
  373. current_pde.copy_from({}, kernel_pde);
  374. flush_tlb(fault.vaddr().page_base());
  375. return PageFaultResponse::Continue;
  376. }
  377. }
  378. auto* region = region_from_vaddr(fault.vaddr());
  379. if (!region) {
  380. kprintf("NP(error) fault at invalid address V%p\n", fault.vaddr().get());
  381. return PageFaultResponse::ShouldCrash;
  382. }
  383. auto page_index_in_region = region->page_index_from_address(fault.vaddr());
  384. if (fault.type() == PageFault::Type::PageNotPresent) {
  385. if (region->vmobject().is_inode()) {
  386. #ifdef PAGE_FAULT_DEBUG
  387. dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
  388. #endif
  389. page_in_from_inode(*region, page_index_in_region);
  390. return PageFaultResponse::Continue;
  391. }
  392. #ifdef PAGE_FAULT_DEBUG
  393. dbgprintf("NP(zero) fault in Region{%p}[%u]\n", region, page_index_in_region);
  394. #endif
  395. zero_page(*region, page_index_in_region);
  396. return PageFaultResponse::Continue;
  397. }
  398. ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
  399. if (fault.access() == PageFault::Access::Write && region->should_cow(page_index_in_region)) {
  400. #ifdef PAGE_FAULT_DEBUG
  401. dbgprintf("PV(cow) fault in Region{%p}[%u]\n", region, page_index_in_region);
  402. #endif
  403. bool success = copy_on_write(*region, page_index_in_region);
  404. ASSERT(success);
  405. return PageFaultResponse::Continue;
  406. }
  407. kprintf("PV(error) fault in Region{%p}[%u] at V%p\n", region, page_index_in_region, fault.vaddr().get());
  408. return PageFaultResponse::ShouldCrash;
  409. }
  410. OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, bool user_accessible, bool should_commit)
  411. {
  412. InterruptDisabler disabler;
  413. ASSERT(!(size % PAGE_SIZE));
  414. auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
  415. ASSERT(range.is_valid());
  416. OwnPtr<Region> region;
  417. if (user_accessible)
  418. region = Region::create_user_accessible(range, name, PROT_READ | PROT_WRITE | PROT_EXEC);
  419. else
  420. region = Region::create_kernel_only(range, name, PROT_READ | PROT_WRITE | PROT_EXEC);
  421. MM.map_region_at_address(*m_kernel_page_directory, *region, range.base());
  422. // FIXME: It would be cool if these could zero-fill on demand instead.
  423. if (should_commit)
  424. region->commit();
  425. return region;
  426. }
  427. OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name)
  428. {
  429. return allocate_kernel_region(size, name, true);
  430. }
  431. void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
  432. {
  433. for (auto& region : m_user_physical_regions) {
  434. if (!region.contains(page)) {
  435. kprintf(
  436. "MM: deallocate_user_physical_page: %p not in %p -> %p\n",
  437. page.paddr(), region.lower().get(), region.upper().get());
  438. continue;
  439. }
  440. region.return_page(move(page));
  441. --m_user_physical_pages_used;
  442. return;
  443. }
  444. kprintf("MM: deallocate_user_physical_page couldn't figure out region for user page @ %p\n", page.paddr());
  445. ASSERT_NOT_REACHED();
  446. }
  447. RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
  448. {
  449. InterruptDisabler disabler;
  450. RefPtr<PhysicalPage> page;
  451. for (auto& region : m_user_physical_regions) {
  452. page = region.take_free_page(false);
  453. if (page.is_null())
  454. continue;
  455. }
  456. if (!page) {
  457. if (m_user_physical_regions.is_empty()) {
  458. kprintf("MM: no user physical regions available (?)\n");
  459. }
  460. kprintf("MM: no user physical pages available\n");
  461. ASSERT_NOT_REACHED();
  462. return {};
  463. }
  464. #ifdef MM_DEBUG
  465. dbgprintf("MM: allocate_user_physical_page vending P%p\n", page->paddr().get());
  466. #endif
  467. if (should_zero_fill == ShouldZeroFill::Yes) {
  468. auto* ptr = (u32*)quickmap_page(*page);
  469. fast_u32_fill(ptr, 0, PAGE_SIZE / sizeof(u32));
  470. unquickmap_page();
  471. }
  472. ++m_user_physical_pages_used;
  473. return page;
  474. }
  475. void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
  476. {
  477. for (auto& region : m_super_physical_regions) {
  478. if (!region.contains(page)) {
  479. kprintf(
  480. "MM: deallocate_supervisor_physical_page: %p not in %p -> %p\n",
  481. page.paddr(), region.lower().get(), region.upper().get());
  482. continue;
  483. }
  484. region.return_page(move(page));
  485. --m_super_physical_pages_used;
  486. return;
  487. }
  488. kprintf("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ %p\n", page.paddr());
  489. ASSERT_NOT_REACHED();
  490. }
  491. RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
  492. {
  493. InterruptDisabler disabler;
  494. RefPtr<PhysicalPage> page;
  495. for (auto& region : m_super_physical_regions) {
  496. page = region.take_free_page(true);
  497. if (page.is_null())
  498. continue;
  499. }
  500. if (!page) {
  501. if (m_super_physical_regions.is_empty()) {
  502. kprintf("MM: no super physical regions available (?)\n");
  503. }
  504. kprintf("MM: no super physical pages available\n");
  505. ASSERT_NOT_REACHED();
  506. return {};
  507. }
  508. #ifdef MM_DEBUG
  509. dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
  510. #endif
  511. fast_u32_fill((u32*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(u32));
  512. ++m_super_physical_pages_used;
  513. return page;
  514. }
  515. void MemoryManager::enter_process_paging_scope(Process& process)
  516. {
  517. ASSERT(current);
  518. InterruptDisabler disabler;
  519. current->tss().cr3 = process.page_directory().cr3();
  520. asm volatile("movl %%eax, %%cr3" ::"a"(process.page_directory().cr3())
  521. : "memory");
  522. }
  523. void MemoryManager::flush_entire_tlb()
  524. {
  525. asm volatile(
  526. "mov %%cr3, %%eax\n"
  527. "mov %%eax, %%cr3\n" ::
  528. : "%eax", "memory");
  529. }
  530. void MemoryManager::flush_tlb(VirtualAddress vaddr)
  531. {
  532. asm volatile("invlpg %0"
  533. :
  534. : "m"(*(char*)vaddr.get())
  535. : "memory");
  536. }
  537. void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr)
  538. {
  539. auto& pte = ensure_pte(kernel_page_directory(), vaddr);
  540. pte.set_physical_page_base(paddr.get());
  541. pte.set_present(true);
  542. pte.set_writable(true);
  543. pte.set_user_allowed(false);
  544. flush_tlb(vaddr);
  545. }
  546. u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
  547. {
  548. ASSERT_INTERRUPTS_DISABLED();
  549. ASSERT(!m_quickmap_in_use);
  550. m_quickmap_in_use = true;
  551. auto page_vaddr = m_quickmap_addr;
  552. auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
  553. pte.set_physical_page_base(physical_page.paddr().get());
  554. pte.set_present(true);
  555. pte.set_writable(true);
  556. pte.set_user_allowed(false);
  557. flush_tlb(page_vaddr);
  558. ASSERT((u32)pte.physical_page_base() == physical_page.paddr().get());
  559. #ifdef MM_DEBUG
  560. dbg() << "MM: >> quickmap_page " << page_vaddr << " => " << physical_page.paddr() << " @ PTE=" << (void*)pte.raw() << " {" << &pte << "}";
  561. #endif
  562. return page_vaddr.as_ptr();
  563. }
  564. void MemoryManager::unquickmap_page()
  565. {
  566. ASSERT_INTERRUPTS_DISABLED();
  567. ASSERT(m_quickmap_in_use);
  568. auto page_vaddr = m_quickmap_addr;
  569. auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
  570. #ifdef MM_DEBUG
  571. auto old_physical_address = pte.physical_page_base();
  572. #endif
  573. pte.set_physical_page_base(0);
  574. pte.set_present(false);
  575. pte.set_writable(false);
  576. flush_tlb(page_vaddr);
  577. #ifdef MM_DEBUG
  578. dbg() << "MM: >> unquickmap_page " << page_vaddr << " =/> " << old_physical_address;
  579. #endif
  580. m_quickmap_in_use = false;
  581. }
  582. void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region)
  583. {
  584. ASSERT(region.page_directory());
  585. InterruptDisabler disabler;
  586. auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
  587. auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
  588. auto& physical_page = region.vmobject().physical_pages()[page_index_in_region];
  589. ASSERT(physical_page);
  590. pte.set_physical_page_base(physical_page->paddr().get());
  591. pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
  592. if (region.should_cow(page_index_in_region))
  593. pte.set_writable(false);
  594. else
  595. pte.set_writable(region.is_writable());
  596. pte.set_user_allowed(region.is_user_accessible());
  597. region.page_directory()->flush(page_vaddr);
  598. #ifdef MM_DEBUG
  599. dbg() << "MM: >> remap_region_page (PD=" << region.page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << region.name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
  600. #endif
  601. }
  602. void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
  603. {
  604. InterruptDisabler disabler;
  605. ASSERT(region.page_directory() == &page_directory);
  606. map_region_at_address(page_directory, region, region.vaddr());
  607. }
  608. void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, VirtualAddress vaddr)
  609. {
  610. InterruptDisabler disabler;
  611. region.set_page_directory(page_directory);
  612. auto& vmo = region.vmobject();
  613. #ifdef MM_DEBUG
  614. dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
  615. #endif
  616. for (size_t i = 0; i < region.page_count(); ++i) {
  617. auto page_vaddr = vaddr.offset(i * PAGE_SIZE);
  618. auto& pte = ensure_pte(page_directory, page_vaddr);
  619. auto& physical_page = vmo.physical_pages()[region.first_page_index() + i];
  620. if (physical_page) {
  621. pte.set_physical_page_base(physical_page->paddr().get());
  622. pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
  623. // FIXME: It seems wrong that the *region* cow map is essentially using *VMO* relative indices.
  624. if (region.should_cow(region.first_page_index() + i))
  625. pte.set_writable(false);
  626. else
  627. pte.set_writable(region.is_writable());
  628. } else {
  629. pte.set_physical_page_base(0);
  630. pte.set_present(false);
  631. pte.set_writable(region.is_writable());
  632. }
  633. pte.set_user_allowed(region.is_user_accessible());
  634. page_directory.flush(page_vaddr);
  635. #ifdef MM_DEBUG
  636. dbgprintf("MM: >> map_region_at_address (PD=%p) '%s' V%p => P%p (@%p)\n", &page_directory, region.name().characters(), page_vaddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
  637. #endif
  638. }
  639. }
  640. bool MemoryManager::unmap_region(Region& region, bool deallocate_range)
  641. {
  642. ASSERT(region.page_directory());
  643. InterruptDisabler disabler;
  644. for (size_t i = 0; i < region.page_count(); ++i) {
  645. auto vaddr = region.vaddr().offset(i * PAGE_SIZE);
  646. auto& pte = ensure_pte(*region.page_directory(), vaddr);
  647. pte.set_physical_page_base(0);
  648. pte.set_present(false);
  649. pte.set_writable(false);
  650. pte.set_user_allowed(false);
  651. region.page_directory()->flush(vaddr);
  652. #ifdef MM_DEBUG
  653. auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
  654. dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
  655. #endif
  656. }
  657. if (deallocate_range)
  658. region.page_directory()->range_allocator().deallocate(region.range());
  659. region.release_page_directory();
  660. return true;
  661. }
  662. bool MemoryManager::map_region(Process& process, Region& region)
  663. {
  664. map_region_at_address(process.page_directory(), region, region.vaddr());
  665. return true;
  666. }
  667. bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
  668. {
  669. auto* region = region_from_vaddr(process, vaddr);
  670. return region && region->is_readable();
  671. }
  672. bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr) const
  673. {
  674. auto* region = region_from_vaddr(process, vaddr);
  675. return region && region->is_writable();
  676. }
  677. void MemoryManager::register_vmo(VMObject& vmo)
  678. {
  679. InterruptDisabler disabler;
  680. m_vmobjects.append(&vmo);
  681. }
  682. void MemoryManager::unregister_vmo(VMObject& vmo)
  683. {
  684. InterruptDisabler disabler;
  685. m_vmobjects.remove(&vmo);
  686. }
  687. void MemoryManager::register_region(Region& region)
  688. {
  689. InterruptDisabler disabler;
  690. if (region.vaddr().get() >= 0xc0000000)
  691. m_kernel_regions.append(&region);
  692. else
  693. m_user_regions.append(&region);
  694. }
  695. void MemoryManager::unregister_region(Region& region)
  696. {
  697. InterruptDisabler disabler;
  698. if (region.vaddr().get() >= 0xc0000000)
  699. m_kernel_regions.remove(&region);
  700. else
  701. m_user_regions.remove(&region);
  702. }
  703. ProcessPagingScope::ProcessPagingScope(Process& process)
  704. {
  705. ASSERT(current);
  706. MM.enter_process_paging_scope(process);
  707. }
  708. ProcessPagingScope::~ProcessPagingScope()
  709. {
  710. MM.enter_process_paging_scope(current->process());
  711. }