MemoryManager.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "CMOS.h"
  27. #include "Process.h"
  28. #include <AK/Assertions.h>
  29. #include <Kernel/Arch/i386/CPU.h>
  30. #include <Kernel/FileSystem/Inode.h>
  31. #include <Kernel/Multiboot.h>
  32. #include <Kernel/VM/AnonymousVMObject.h>
  33. #include <Kernel/VM/MemoryManager.h>
  34. #include <Kernel/VM/PageDirectory.h>
  35. #include <Kernel/VM/PhysicalRegion.h>
  36. #include <Kernel/VM/PurgeableVMObject.h>
  37. #include <Kernel/VM/SharedInodeVMObject.h>
  38. #include <LibBareMetal/StdLib.h>
  39. //#define MM_DEBUG
  40. //#define PAGE_FAULT_DEBUG
  41. extern uintptr_t start_of_kernel_text;
  42. extern uintptr_t start_of_kernel_data;
  43. extern uintptr_t end_of_kernel_bss;
  44. namespace Kernel {
  45. static MemoryManager* s_the;
  46. MemoryManager& MM
  47. {
  48. return *s_the;
  49. }
  50. MemoryManager::MemoryManager()
  51. {
  52. m_kernel_page_directory = PageDirectory::create_kernel_page_directory();
  53. parse_memory_map();
  54. write_cr3(kernel_page_directory().cr3());
  55. setup_low_identity_mapping();
  56. protect_kernel_image();
  57. m_shared_zero_page = allocate_user_physical_page();
  58. }
  59. MemoryManager::~MemoryManager()
  60. {
  61. }
  62. void MemoryManager::protect_kernel_image()
  63. {
  64. // Disable writing to the kernel text and rodata segments.
  65. for (size_t i = (uintptr_t)&start_of_kernel_text; i < (uintptr_t)&start_of_kernel_data; i += PAGE_SIZE) {
  66. auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
  67. pte.set_writable(false);
  68. }
  69. if (g_cpu_supports_nx) {
  70. // Disable execution of the kernel data and bss segments.
  71. for (size_t i = (uintptr_t)&start_of_kernel_data; i < (uintptr_t)&end_of_kernel_bss; i += PAGE_SIZE) {
  72. auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
  73. pte.set_execute_disabled(true);
  74. }
  75. }
  76. }
  77. void MemoryManager::setup_low_identity_mapping()
  78. {
  79. m_low_page_table = allocate_user_physical_page(ShouldZeroFill::Yes);
  80. auto* pd_zero = quickmap_pd(kernel_page_directory(), 0);
  81. pd_zero[1].set_present(false);
  82. pd_zero[2].set_present(false);
  83. pd_zero[3].set_present(false);
  84. auto& pde_zero = pd_zero[0];
  85. pde_zero.set_page_table_base(m_low_page_table->paddr().get());
  86. pde_zero.set_present(true);
  87. pde_zero.set_huge(false);
  88. pde_zero.set_writable(true);
  89. pde_zero.set_user_allowed(false);
  90. if (g_cpu_supports_nx)
  91. pde_zero.set_execute_disabled(true);
  92. for (uintptr_t offset = (1 * MB); offset < (2 * MB); offset += PAGE_SIZE) {
  93. auto& page_table_page = m_low_page_table;
  94. auto& pte = quickmap_pt(page_table_page->paddr())[offset / PAGE_SIZE];
  95. pte.set_physical_page_base(offset);
  96. pte.set_user_allowed(false);
  97. pte.set_present(offset != 0);
  98. pte.set_writable(offset < (1 * MB));
  99. }
  100. }
  101. void MemoryManager::parse_memory_map()
  102. {
  103. RefPtr<PhysicalRegion> region;
  104. bool region_is_super = false;
  105. auto* mmap = (multiboot_memory_map_t*)(low_physical_to_virtual(multiboot_info_ptr->mmap_addr));
  106. for (; (unsigned long)mmap < (low_physical_to_virtual(multiboot_info_ptr->mmap_addr)) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
  107. klog() << "MM: Multiboot mmap: base_addr = " << String::format("0x%08x", mmap->addr) << ", length = " << String::format("0x%08x", mmap->len) << ", type = 0x" << String::format("%x", mmap->type);
  108. if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
  109. continue;
  110. // FIXME: Maybe make use of stuff below the 1MB mark?
  111. if (mmap->addr < (1 * MB))
  112. continue;
  113. if ((mmap->addr + mmap->len) > 0xffffffff)
  114. continue;
  115. auto diff = (uintptr_t)mmap->addr % PAGE_SIZE;
  116. if (diff != 0) {
  117. klog() << "MM: got an unaligned region base from the bootloader; correcting " << String::format("%p", mmap->addr) << " by " << diff << " bytes";
  118. diff = PAGE_SIZE - diff;
  119. mmap->addr += diff;
  120. mmap->len -= diff;
  121. }
  122. if ((mmap->len % PAGE_SIZE) != 0) {
  123. klog() << "MM: got an unaligned region length from the bootloader; correcting " << mmap->len << " by " << (mmap->len % PAGE_SIZE) << " bytes";
  124. mmap->len -= mmap->len % PAGE_SIZE;
  125. }
  126. if (mmap->len < PAGE_SIZE) {
  127. klog() << "MM: memory region from bootloader is too small; we want >= " << PAGE_SIZE << " bytes, but got " << mmap->len << " bytes";
  128. continue;
  129. }
  130. #ifdef MM_DEBUG
  131. klog() << "MM: considering memory at " << String::format("%p", (uintptr_t)mmap->addr) << " - " << String::format("%p", (uintptr_t)(mmap->addr + mmap->len));
  132. #endif
  133. for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
  134. auto addr = PhysicalAddress(page_base);
  135. if (page_base < 7 * MB) {
  136. // nothing
  137. } else if (page_base >= 7 * MB && page_base < 8 * MB) {
  138. if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
  139. m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
  140. region = m_super_physical_regions.last();
  141. region_is_super = true;
  142. } else {
  143. region->expand(region->lower(), addr);
  144. }
  145. } else {
  146. if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
  147. m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
  148. region = m_user_physical_regions.last();
  149. region_is_super = false;
  150. } else {
  151. region->expand(region->lower(), addr);
  152. }
  153. }
  154. }
  155. }
  156. for (auto& region : m_super_physical_regions)
  157. m_super_physical_pages += region.finalize_capacity();
  158. for (auto& region : m_user_physical_regions)
  159. m_user_physical_pages += region.finalize_capacity();
  160. }
  161. const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
  162. {
  163. ASSERT_INTERRUPTS_DISABLED();
  164. u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
  165. u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
  166. u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
  167. auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
  168. const PageDirectoryEntry& pde = pd[page_directory_index];
  169. if (!pde.is_present())
  170. return nullptr;
  171. return &quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
  172. }
  173. PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
  174. {
  175. ASSERT_INTERRUPTS_DISABLED();
  176. u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
  177. u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
  178. u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
  179. auto* pd = quickmap_pd(page_directory, page_directory_table_index);
  180. PageDirectoryEntry& pde = pd[page_directory_index];
  181. if (!pde.is_present()) {
  182. #ifdef MM_DEBUG
  183. dbg() << "MM: PDE " << page_directory_index << " not present (requested for V" << String::format("%p", vaddr.get()) << "), allocating";
  184. #endif
  185. auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes);
  186. #ifdef MM_DEBUG
  187. dbg() << "MM: PD K" << &page_directory << " (" << (&page_directory == m_kernel_page_directory ? "Kernel" : "User") << ") at P" << String::format("%p", page_directory.cr3()) << " allocated page table " << String::format("#%u", page_directory_index) << " (for V" << String::format("%p", vaddr.get()) << ") at P" << String::format("%p", page_table->paddr().get());
  188. #endif
  189. pde.set_page_table_base(page_table->paddr().get());
  190. pde.set_user_allowed(true);
  191. pde.set_present(true);
  192. pde.set_writable(true);
  193. pde.set_global(&page_directory == m_kernel_page_directory.ptr());
  194. page_directory.m_physical_pages.set(page_directory_index, move(page_table));
  195. }
  196. return quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
  197. }
  198. void MemoryManager::initialize()
  199. {
  200. s_the = new MemoryManager;
  201. }
  202. Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
  203. {
  204. if (vaddr.get() < 0xc0000000)
  205. return nullptr;
  206. for (auto& region : MM.m_kernel_regions) {
  207. if (region.contains(vaddr))
  208. return &region;
  209. }
  210. return nullptr;
  211. }
  212. Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress vaddr)
  213. {
  214. // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
  215. for (auto& region : process.m_regions) {
  216. if (region.contains(vaddr))
  217. return &region;
  218. }
  219. #ifdef MM_DEBUG
  220. dbg() << process << " Couldn't find user region for " << vaddr;
  221. #endif
  222. return nullptr;
  223. }
  224. Region* MemoryManager::region_from_vaddr(Process& process, VirtualAddress vaddr)
  225. {
  226. if (auto* region = kernel_region_from_vaddr(vaddr))
  227. return region;
  228. return user_region_from_vaddr(process, vaddr);
  229. }
  230. const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAddress vaddr)
  231. {
  232. if (auto* region = kernel_region_from_vaddr(vaddr))
  233. return region;
  234. return user_region_from_vaddr(const_cast<Process&>(process), vaddr);
  235. }
  236. Region* MemoryManager::region_from_vaddr(VirtualAddress vaddr)
  237. {
  238. if (auto* region = kernel_region_from_vaddr(vaddr))
  239. return region;
  240. auto page_directory = PageDirectory::find_by_cr3(read_cr3());
  241. if (!page_directory)
  242. return nullptr;
  243. ASSERT(page_directory->process());
  244. return user_region_from_vaddr(*page_directory->process(), vaddr);
  245. }
  246. PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
  247. {
  248. ASSERT_INTERRUPTS_DISABLED();
  249. ASSERT(Thread::current);
  250. if (g_in_irq) {
  251. dbg() << "BUG! Page fault while handling IRQ! code=" << fault.code() << ", vaddr=" << fault.vaddr();
  252. dump_kernel_regions();
  253. }
  254. #ifdef PAGE_FAULT_DEBUG
  255. dbg() << "MM: handle_page_fault(" << String::format("%w", fault.code()) << ") at V" << String::format("%p", fault.vaddr().get());
  256. #endif
  257. auto* region = region_from_vaddr(fault.vaddr());
  258. if (!region) {
  259. klog() << "NP(error) fault at invalid address V" << String::format("%p", fault.vaddr().get());
  260. return PageFaultResponse::ShouldCrash;
  261. }
  262. return region->handle_fault(fault);
  263. }
  264. OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable)
  265. {
  266. ASSERT(!(size % PAGE_SIZE));
  267. auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
  268. if (!range.is_valid())
  269. return nullptr;
  270. auto vmobject = AnonymousVMObject::create_with_size(size);
  271. auto region = allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
  272. if (!region)
  273. return nullptr;
  274. if (should_commit)
  275. region->commit();
  276. return region;
  277. }
  278. OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
  279. {
  280. ASSERT(!(size % PAGE_SIZE));
  281. auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
  282. if (!range.is_valid())
  283. return nullptr;
  284. auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
  285. if (!vmobject)
  286. return nullptr;
  287. return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
  288. }
  289. OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
  290. {
  291. return allocate_kernel_region(size, name, access, true, true, cacheable);
  292. }
  293. OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
  294. {
  295. InterruptDisabler disabler;
  296. OwnPtr<Region> region;
  297. if (user_accessible)
  298. region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable);
  299. else
  300. region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable);
  301. if (region)
  302. region->map(kernel_page_directory());
  303. return region;
  304. }
  305. OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
  306. {
  307. ASSERT(!(size % PAGE_SIZE));
  308. auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
  309. if (!range.is_valid())
  310. return nullptr;
  311. return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
  312. }
  313. void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
  314. {
  315. for (auto& region : m_user_physical_regions) {
  316. if (!region.contains(page)) {
  317. klog() << "MM: deallocate_user_physical_page: " << page.paddr() << " not in " << region.lower() << " -> " << region.upper();
  318. continue;
  319. }
  320. region.return_page(move(page));
  321. --m_user_physical_pages_used;
  322. return;
  323. }
  324. klog() << "MM: deallocate_user_physical_page couldn't figure out region for user page @ " << page.paddr();
  325. ASSERT_NOT_REACHED();
  326. }
  327. RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page()
  328. {
  329. RefPtr<PhysicalPage> page;
  330. for (auto& region : m_user_physical_regions) {
  331. page = region.take_free_page(false);
  332. if (!page.is_null())
  333. break;
  334. }
  335. return page;
  336. }
  337. RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
  338. {
  339. InterruptDisabler disabler;
  340. RefPtr<PhysicalPage> page = find_free_user_physical_page();
  341. if (!page) {
  342. if (m_user_physical_regions.is_empty()) {
  343. klog() << "MM: no user physical regions available (?)";
  344. }
  345. for_each_vmobject([&](auto& vmobject) {
  346. if (vmobject.is_purgeable()) {
  347. auto& purgeable_vmobject = static_cast<PurgeableVMObject&>(vmobject);
  348. int purged_page_count = purgeable_vmobject.purge_with_interrupts_disabled({});
  349. if (purged_page_count) {
  350. klog() << "MM: Purge saved the day! Purged " << purged_page_count << " pages from PurgeableVMObject{" << &purgeable_vmobject << "}";
  351. page = find_free_user_physical_page();
  352. ASSERT(page);
  353. return IterationDecision::Break;
  354. }
  355. }
  356. return IterationDecision::Continue;
  357. });
  358. if (!page) {
  359. klog() << "MM: no user physical pages available";
  360. ASSERT_NOT_REACHED();
  361. return {};
  362. }
  363. }
  364. #ifdef MM_DEBUG
  365. dbg() << "MM: allocate_user_physical_page vending P" << String::format("%p", page->paddr().get());
  366. #endif
  367. if (should_zero_fill == ShouldZeroFill::Yes) {
  368. auto* ptr = quickmap_page(*page);
  369. memset(ptr, 0, PAGE_SIZE);
  370. unquickmap_page();
  371. }
  372. ++m_user_physical_pages_used;
  373. return page;
  374. }
  375. void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
  376. {
  377. for (auto& region : m_super_physical_regions) {
  378. if (!region.contains(page)) {
  379. klog() << "MM: deallocate_supervisor_physical_page: " << page.paddr() << " not in " << region.lower() << " -> " << region.upper();
  380. continue;
  381. }
  382. region.return_page(move(page));
  383. --m_super_physical_pages_used;
  384. return;
  385. }
  386. klog() << "MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ " << page.paddr();
  387. ASSERT_NOT_REACHED();
  388. }
  389. RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
  390. {
  391. InterruptDisabler disabler;
  392. RefPtr<PhysicalPage> page;
  393. for (auto& region : m_super_physical_regions) {
  394. page = region.take_free_page(true);
  395. if (page.is_null())
  396. continue;
  397. }
  398. if (!page) {
  399. if (m_super_physical_regions.is_empty()) {
  400. klog() << "MM: no super physical regions available (?)";
  401. }
  402. klog() << "MM: no super physical pages available";
  403. ASSERT_NOT_REACHED();
  404. return {};
  405. }
  406. #ifdef MM_DEBUG
  407. dbg() << "MM: allocate_supervisor_physical_page vending P" << String::format("%p", page->paddr().get());
  408. #endif
  409. fast_u32_fill((u32*)page->paddr().offset(0xc0000000).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
  410. ++m_super_physical_pages_used;
  411. return page;
  412. }
  413. void MemoryManager::enter_process_paging_scope(Process& process)
  414. {
  415. ASSERT(Thread::current);
  416. InterruptDisabler disabler;
  417. Thread::current->tss().cr3 = process.page_directory().cr3();
  418. write_cr3(process.page_directory().cr3());
  419. }
  420. void MemoryManager::flush_entire_tlb()
  421. {
  422. write_cr3(read_cr3());
  423. }
  424. void MemoryManager::flush_tlb(VirtualAddress vaddr)
  425. {
  426. #ifdef MM_DEBUG
  427. dbg() << "MM: Flush page V" << String::format("%p", vaddr.get());
  428. #endif
  429. asm volatile("invlpg %0"
  430. :
  431. : "m"(*(char*)vaddr.get())
  432. : "memory");
  433. }
  434. extern "C" PageTableEntry boot_pd3_pde1023_pt[1024];
  435. PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
  436. {
  437. auto& pte = boot_pd3_pde1023_pt[4];
  438. auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
  439. if (pte.physical_page_base() != pd_paddr.as_ptr()) {
  440. #ifdef MM_DEBUG
  441. dbg() << "quickmap_pd: Mapping P" << (void*)directory.m_directory_pages[pdpt_index]->paddr().as_ptr() << " at 0xffe04000 in pte @ " << &pte;
  442. #endif
  443. pte.set_physical_page_base(pd_paddr.get());
  444. pte.set_present(true);
  445. pte.set_writable(true);
  446. pte.set_user_allowed(false);
  447. flush_tlb(VirtualAddress(0xffe04000));
  448. }
  449. return (PageDirectoryEntry*)0xffe04000;
  450. }
  451. PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
  452. {
  453. auto& pte = boot_pd3_pde1023_pt[8];
  454. if (pte.physical_page_base() != pt_paddr.as_ptr()) {
  455. #ifdef MM_DEBUG
  456. dbg() << "quickmap_pt: Mapping P" << (void*)pt_paddr.as_ptr() << " at 0xffe08000 in pte @ " << &pte;
  457. #endif
  458. pte.set_physical_page_base(pt_paddr.get());
  459. pte.set_present(true);
  460. pte.set_writable(true);
  461. pte.set_user_allowed(false);
  462. flush_tlb(VirtualAddress(0xffe08000));
  463. }
  464. return (PageTableEntry*)0xffe08000;
  465. }
  466. u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
  467. {
  468. ASSERT_INTERRUPTS_DISABLED();
  469. ASSERT(!m_quickmap_in_use);
  470. m_quickmap_in_use = true;
  471. auto& pte = boot_pd3_pde1023_pt[0];
  472. if (pte.physical_page_base() != physical_page.paddr().as_ptr()) {
  473. #ifdef MM_DEBUG
  474. dbg() << "quickmap_page: Mapping P" << (void*)physical_page.paddr().as_ptr() << " at 0xffe00000 in pte @ " << &pte;
  475. #endif
  476. pte.set_physical_page_base(physical_page.paddr().get());
  477. pte.set_present(true);
  478. pte.set_writable(true);
  479. pte.set_user_allowed(false);
  480. flush_tlb(VirtualAddress(0xffe00000));
  481. }
  482. return (u8*)0xffe00000;
  483. }
  484. void MemoryManager::unquickmap_page()
  485. {
  486. ASSERT_INTERRUPTS_DISABLED();
  487. ASSERT(m_quickmap_in_use);
  488. auto& pte = boot_pd3_pde1023_pt[0];
  489. pte.clear();
  490. flush_tlb(VirtualAddress(0xffe00000));
  491. m_quickmap_in_use = false;
  492. }
  493. template<MemoryManager::AccessSpace space, MemoryManager::AccessType access_type>
  494. bool MemoryManager::validate_range(const Process& process, VirtualAddress base_vaddr, size_t size) const
  495. {
  496. ASSERT(size);
  497. if (base_vaddr > base_vaddr.offset(size)) {
  498. dbg() << "Shenanigans! Asked to validate wrappy " << base_vaddr << " size=" << size;
  499. return false;
  500. }
  501. VirtualAddress vaddr = base_vaddr.page_base();
  502. VirtualAddress end_vaddr = base_vaddr.offset(size - 1).page_base();
  503. if (end_vaddr < vaddr) {
  504. dbg() << "Shenanigans! Asked to validate " << base_vaddr << " size=" << size;
  505. return false;
  506. }
  507. const Region* region = nullptr;
  508. while (vaddr <= end_vaddr) {
  509. if (!region || !region->contains(vaddr)) {
  510. if (space == AccessSpace::Kernel)
  511. region = kernel_region_from_vaddr(vaddr);
  512. if (!region || !region->contains(vaddr))
  513. region = user_region_from_vaddr(const_cast<Process&>(process), vaddr);
  514. if (!region
  515. || (space == AccessSpace::User && !region->is_user_accessible())
  516. || (access_type == AccessType::Read && !region->is_readable())
  517. || (access_type == AccessType::Write && !region->is_writable())) {
  518. return false;
  519. }
  520. }
  521. vaddr = vaddr.offset(PAGE_SIZE);
  522. }
  523. return true;
  524. }
  525. bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress vaddr) const
  526. {
  527. if (!is_user_address(vaddr))
  528. return false;
  529. auto* region = user_region_from_vaddr(const_cast<Process&>(process), vaddr);
  530. return region && region->is_user_accessible() && region->is_stack();
  531. }
  532. bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress vaddr, size_t size) const
  533. {
  534. return validate_range<AccessSpace::Kernel, AccessType::Read>(process, vaddr, size);
  535. }
  536. bool MemoryManager::can_read_without_faulting(const Process& process, VirtualAddress vaddr, size_t size) const
  537. {
  538. // FIXME: Use the size argument!
  539. UNUSED_PARAM(size);
  540. auto* pte = const_cast<MemoryManager*>(this)->pte(process.page_directory(), vaddr);
  541. if (!pte)
  542. return false;
  543. return pte->is_present();
  544. }
  545. bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr, size_t size) const
  546. {
  547. if (!is_user_address(vaddr))
  548. return false;
  549. return validate_range<AccessSpace::User, AccessType::Read>(process, vaddr, size);
  550. }
  551. bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr, size_t size) const
  552. {
  553. if (!is_user_address(vaddr))
  554. return false;
  555. return validate_range<AccessSpace::User, AccessType::Write>(process, vaddr, size);
  556. }
  557. void MemoryManager::register_vmobject(VMObject& vmobject)
  558. {
  559. InterruptDisabler disabler;
  560. m_vmobjects.append(&vmobject);
  561. }
  562. void MemoryManager::unregister_vmobject(VMObject& vmobject)
  563. {
  564. InterruptDisabler disabler;
  565. m_vmobjects.remove(&vmobject);
  566. }
  567. void MemoryManager::register_region(Region& region)
  568. {
  569. InterruptDisabler disabler;
  570. if (region.vaddr().get() >= 0xc0000000)
  571. m_kernel_regions.append(&region);
  572. else
  573. m_user_regions.append(&region);
  574. }
  575. void MemoryManager::unregister_region(Region& region)
  576. {
  577. InterruptDisabler disabler;
  578. if (region.vaddr().get() >= 0xc0000000)
  579. m_kernel_regions.remove(&region);
  580. else
  581. m_user_regions.remove(&region);
  582. }
  583. void MemoryManager::dump_kernel_regions()
  584. {
  585. klog() << "Kernel regions:";
  586. klog() << "BEGIN END SIZE ACCESS NAME";
  587. for (auto& region : MM.m_kernel_regions) {
  588. klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters();
  589. }
  590. }
  591. }