Region.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/StringView.h>
  7. #include <Kernel/Arch/PageDirectory.h>
  8. #include <Kernel/Arch/PageFault.h>
  9. #include <Kernel/Debug.h>
  10. #include <Kernel/FileSystem/Inode.h>
  11. #include <Kernel/InterruptDisabler.h>
  12. #include <Kernel/Memory/AnonymousVMObject.h>
  13. #include <Kernel/Memory/MemoryManager.h>
  14. #include <Kernel/Memory/Region.h>
  15. #include <Kernel/Memory/SharedInodeVMObject.h>
  16. #include <Kernel/Panic.h>
  17. #include <Kernel/Tasks/Process.h>
  18. #include <Kernel/Tasks/Scheduler.h>
  19. #include <Kernel/Tasks/Thread.h>
  20. namespace Kernel::Memory {
  21. Region::Region()
  22. : m_range(VirtualRange({}, 0))
  23. {
  24. }
  25. Region::Region(NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
  26. : m_range(VirtualRange({}, 0))
  27. , m_offset_in_vmobject(offset_in_vmobject)
  28. , m_vmobject(move(vmobject))
  29. , m_name(move(name))
  30. , m_access(access | ((access & 0x7) << 4))
  31. , m_shared(shared)
  32. , m_cacheable(cacheable == Cacheable::Yes)
  33. {
  34. m_vmobject->add_region(*this);
  35. }
  36. Region::Region(VirtualRange const& range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
  37. : m_range(range)
  38. , m_offset_in_vmobject(offset_in_vmobject)
  39. , m_vmobject(move(vmobject))
  40. , m_name(move(name))
  41. , m_access(access | ((access & 0x7) << 4))
  42. , m_shared(shared)
  43. , m_cacheable(cacheable == Cacheable::Yes)
  44. {
  45. VERIFY(m_range.base().is_page_aligned());
  46. VERIFY(m_range.size());
  47. VERIFY((m_range.size() % PAGE_SIZE) == 0);
  48. m_vmobject->add_region(*this);
  49. }
  50. Region::~Region()
  51. {
  52. if (is_writable() && vmobject().is_shared_inode()) {
  53. // FIXME: This is very aggressive. Find a way to do less work!
  54. (void)static_cast<SharedInodeVMObject&>(vmobject()).sync();
  55. }
  56. m_vmobject->remove_region(*this);
  57. if (m_page_directory) {
  58. SpinlockLocker pd_locker(m_page_directory->get_lock());
  59. if (!is_readable() && !is_writable() && !is_executable()) {
  60. // If the region is "PROT_NONE", we didn't map it in the first place.
  61. } else {
  62. unmap_with_locks_held(ShouldFlushTLB::Yes, pd_locker);
  63. VERIFY(!m_page_directory);
  64. }
  65. }
  66. if (is_kernel())
  67. MM.unregister_kernel_region(*this);
  68. // Extend the lifetime of the region if there are any page faults in progress for this region's pages.
  69. // Both the removal of regions from the region trees and the fetching of the regions from the tree
  70. // during the start of page fault handling are serialized under the address space spinlock. This means
  71. // that once the region is removed no more page faults on this region can start, so this counter will
  72. // eventually reach 0. And similarly since we can only reach the region destructor once the region was
  73. // removed from the appropriate region tree, it is guaranteed that any page faults that are still being
  74. // handled have already increased this counter, and will be allowed to finish before deallocation.
  75. while (m_in_progress_page_faults)
  76. Processor::wait_check();
  77. }
  78. ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked()
  79. {
  80. return adopt_nonnull_own_or_enomem(new (nothrow) Region);
  81. }
  82. ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
  83. {
  84. return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
  85. }
  86. ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
  87. {
  88. VERIFY(Process::has_current());
  89. if (m_shared) {
  90. VERIFY(!m_stack);
  91. if (vmobject().is_inode())
  92. VERIFY(vmobject().is_shared_inode());
  93. // Create a new region backed by the same VMObject.
  94. OwnPtr<KString> region_name;
  95. if (m_name)
  96. region_name = TRY(m_name->try_clone());
  97. auto region = TRY(Region::try_create_user_accessible(
  98. m_range, vmobject(), m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
  99. region->set_mmap(m_mmap, m_mmapped_from_readable, m_mmapped_from_writable);
  100. region->set_shared(m_shared);
  101. region->set_syscall_region(is_syscall_region());
  102. return region;
  103. }
  104. if (vmobject().is_inode())
  105. VERIFY(vmobject().is_private_inode());
  106. auto vmobject_clone = TRY(vmobject().try_clone());
  107. // Set up a COW region. The parent (this) region becomes COW as well!
  108. if (is_writable())
  109. remap();
  110. OwnPtr<KString> clone_region_name;
  111. if (m_name)
  112. clone_region_name = TRY(m_name->try_clone());
  113. auto clone_region = TRY(Region::try_create_user_accessible(
  114. m_range, move(vmobject_clone), m_offset_in_vmobject, move(clone_region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
  115. if (m_stack) {
  116. VERIFY(vmobject().is_anonymous());
  117. clone_region->set_stack(true);
  118. }
  119. clone_region->set_syscall_region(is_syscall_region());
  120. clone_region->set_mmap(m_mmap, m_mmapped_from_readable, m_mmapped_from_writable);
  121. return clone_region;
  122. }
  123. void Region::set_vmobject(NonnullLockRefPtr<VMObject>&& obj)
  124. {
  125. if (m_vmobject.ptr() == obj.ptr())
  126. return;
  127. m_vmobject->remove_region(*this);
  128. m_vmobject = move(obj);
  129. m_vmobject->add_region(*this);
  130. }
  131. size_t Region::cow_pages() const
  132. {
  133. if (!vmobject().is_anonymous())
  134. return 0;
  135. return static_cast<AnonymousVMObject const&>(vmobject()).cow_pages();
  136. }
  137. size_t Region::amount_dirty() const
  138. {
  139. if (!vmobject().is_inode())
  140. return amount_resident();
  141. return static_cast<InodeVMObject const&>(vmobject()).amount_dirty();
  142. }
  143. size_t Region::amount_resident() const
  144. {
  145. size_t bytes = 0;
  146. for (size_t i = 0; i < page_count(); ++i) {
  147. auto page = physical_page(i);
  148. if (page && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
  149. bytes += PAGE_SIZE;
  150. }
  151. return bytes;
  152. }
  153. size_t Region::amount_shared() const
  154. {
  155. size_t bytes = 0;
  156. for (size_t i = 0; i < page_count(); ++i) {
  157. auto page = physical_page(i);
  158. if (page && page->ref_count() > 1 && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
  159. bytes += PAGE_SIZE;
  160. }
  161. return bytes;
  162. }
  163. ErrorOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
  164. {
  165. return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
  166. }
  167. bool Region::should_cow(size_t page_index) const
  168. {
  169. if (!vmobject().is_anonymous())
  170. return false;
  171. return static_cast<AnonymousVMObject const&>(vmobject()).should_cow(first_page_index() + page_index, m_shared);
  172. }
  173. ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
  174. {
  175. VERIFY(!m_shared);
  176. if (vmobject().is_anonymous())
  177. TRY(static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow));
  178. return {};
  179. }
  180. bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
  181. {
  182. VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
  183. auto page_vaddr = vaddr_from_page_index(page_index);
  184. bool user_allowed = page_vaddr.get() >= USER_RANGE_BASE && is_user_address(page_vaddr);
  185. if (is_mmap() && !user_allowed) {
  186. PANIC("About to map mmap'ed page at a kernel address");
  187. }
  188. auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
  189. if (!pte)
  190. return false;
  191. if (!page || (!is_readable() && !is_writable())) {
  192. pte->clear();
  193. return true;
  194. }
  195. pte->set_cache_disabled(!m_cacheable);
  196. pte->set_physical_page_base(page->paddr().get());
  197. pte->set_present(true);
  198. if (page->is_shared_zero_page() || page->is_lazy_committed_page() || should_cow(page_index))
  199. pte->set_writable(false);
  200. else
  201. pte->set_writable(is_writable());
  202. if (Processor::current().has_nx())
  203. pte->set_execute_disabled(!is_executable());
  204. if (Processor::current().has_pat())
  205. pte->set_pat(is_write_combine());
  206. pte->set_user_allowed(user_allowed);
  207. return true;
  208. }
  209. bool Region::map_individual_page_impl(size_t page_index)
  210. {
  211. RefPtr<PhysicalPage> page;
  212. {
  213. SpinlockLocker vmobject_locker(vmobject().m_lock);
  214. page = physical_page(page_index);
  215. }
  216. return map_individual_page_impl(page_index, page);
  217. }
  218. bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page)
  219. {
  220. SpinlockLocker page_lock(m_page_directory->get_lock());
  221. // NOTE: `page_index` is a VMObject page index, so first we convert it to a Region page index.
  222. if (!translate_vmobject_page(page_index))
  223. return false;
  224. bool success = map_individual_page_impl(page_index, physical_page);
  225. MemoryManager::flush_tlb(m_page_directory, vaddr_from_page_index(page_index));
  226. return success;
  227. }
  228. void Region::unmap(ShouldFlushTLB should_flush_tlb)
  229. {
  230. if (!m_page_directory)
  231. return;
  232. SpinlockLocker pd_locker(m_page_directory->get_lock());
  233. unmap_with_locks_held(should_flush_tlb, pd_locker);
  234. }
  235. void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock<LockRank::None>>&)
  236. {
  237. if (!m_page_directory)
  238. return;
  239. size_t count = page_count();
  240. for (size_t i = 0; i < count; ++i) {
  241. auto vaddr = vaddr_from_page_index(i);
  242. MM.release_pte(*m_page_directory, vaddr, i == count - 1 ? MemoryManager::IsLastPTERelease::Yes : MemoryManager::IsLastPTERelease::No);
  243. }
  244. if (should_flush_tlb == ShouldFlushTLB::Yes)
  245. MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
  246. m_page_directory = nullptr;
  247. }
  248. void Region::set_page_directory(PageDirectory& page_directory)
  249. {
  250. VERIFY(!m_page_directory || m_page_directory == &page_directory);
  251. m_page_directory = page_directory;
  252. }
  253. ErrorOr<void> Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
  254. {
  255. SpinlockLocker page_lock(page_directory.get_lock());
  256. // FIXME: Find a better place for this sanity check(?)
  257. if (is_user() && !is_shared()) {
  258. VERIFY(!vmobject().is_shared_inode());
  259. }
  260. set_page_directory(page_directory);
  261. size_t page_index = 0;
  262. while (page_index < page_count()) {
  263. if (!map_individual_page_impl(page_index))
  264. break;
  265. ++page_index;
  266. }
  267. if (page_index > 0) {
  268. if (should_flush_tlb == ShouldFlushTLB::Yes)
  269. MemoryManager::flush_tlb(m_page_directory, vaddr(), page_index);
  270. if (page_index == page_count())
  271. return {};
  272. }
  273. return ENOMEM;
  274. }
  275. void Region::remap()
  276. {
  277. VERIFY(m_page_directory);
  278. auto result = map(*m_page_directory);
  279. if (result.is_error())
  280. TODO();
  281. }
  282. ErrorOr<void> Region::set_write_combine(bool enable)
  283. {
  284. if (enable && !Processor::current().has_pat()) {
  285. dbgln("PAT is not supported, implement MTRR fallback if available");
  286. return Error::from_errno(ENOTSUP);
  287. }
  288. m_write_combine = enable;
  289. remap();
  290. return {};
  291. }
  292. void Region::clear_to_zero()
  293. {
  294. VERIFY(vmobject().is_anonymous());
  295. SpinlockLocker locker(vmobject().m_lock);
  296. for (auto i = 0u; i < page_count(); ++i) {
  297. auto& page = physical_page_slot(i);
  298. VERIFY(page);
  299. if (page->is_shared_zero_page())
  300. continue;
  301. page = MM.shared_zero_page();
  302. }
  303. }
  304. PageFaultResponse Region::handle_fault(PageFault const& fault)
  305. {
  306. auto page_index_in_region = page_index_from_address(fault.vaddr());
  307. if (fault.type() == PageFault::Type::PageNotPresent) {
  308. if (fault.is_read() && !is_readable()) {
  309. dbgln("NP(non-readable) fault in Region({})[{}]", this, page_index_in_region);
  310. return PageFaultResponse::ShouldCrash;
  311. }
  312. if (fault.is_write() && !is_writable()) {
  313. dbgln("NP(non-writable) write fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
  314. return PageFaultResponse::ShouldCrash;
  315. }
  316. if (vmobject().is_inode()) {
  317. dbgln_if(PAGE_FAULT_DEBUG, "NP(inode) fault in Region({})[{}]", this, page_index_in_region);
  318. return handle_inode_fault(page_index_in_region);
  319. }
  320. SpinlockLocker vmobject_locker(vmobject().m_lock);
  321. auto& page_slot = physical_page_slot(page_index_in_region);
  322. if (page_slot->is_lazy_committed_page()) {
  323. auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
  324. VERIFY(m_vmobject->is_anonymous());
  325. page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
  326. if (!remap_vmobject_page(page_index_in_vmobject, *page_slot))
  327. return PageFaultResponse::OutOfMemory;
  328. return PageFaultResponse::Continue;
  329. }
  330. dbgln("BUG! Unexpected NP fault at {}", fault.vaddr());
  331. dbgln(" - Physical page slot pointer: {:p}", page_slot.ptr());
  332. if (page_slot) {
  333. dbgln(" - Physical page: {}", page_slot->paddr());
  334. dbgln(" - Lazy committed: {}", page_slot->is_lazy_committed_page());
  335. dbgln(" - Shared zero: {}", page_slot->is_shared_zero_page());
  336. }
  337. return PageFaultResponse::ShouldCrash;
  338. }
  339. VERIFY(fault.type() == PageFault::Type::ProtectionViolation);
  340. if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
  341. dbgln_if(PAGE_FAULT_DEBUG, "PV(cow) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
  342. auto phys_page = physical_page(page_index_in_region);
  343. if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
  344. dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
  345. return handle_zero_fault(page_index_in_region, *phys_page);
  346. }
  347. return handle_cow_fault(page_index_in_region);
  348. }
  349. dbgln("PV(error) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
  350. return PageFaultResponse::ShouldCrash;
  351. }
  352. PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, PhysicalPage& page_in_slot_at_time_of_fault)
  353. {
  354. VERIFY(vmobject().is_anonymous());
  355. auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
  356. auto current_thread = Thread::current();
  357. if (current_thread != nullptr)
  358. current_thread->did_zero_fault();
  359. RefPtr<PhysicalPage> new_physical_page;
  360. if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
  361. VERIFY(m_vmobject->is_anonymous());
  362. new_physical_page = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
  363. dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", new_physical_page->paddr());
  364. } else {
  365. auto page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
  366. if (page_or_error.is_error()) {
  367. dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
  368. return PageFaultResponse::OutOfMemory;
  369. }
  370. new_physical_page = page_or_error.release_value();
  371. dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", new_physical_page->paddr());
  372. }
  373. bool already_handled = false;
  374. {
  375. SpinlockLocker locker(vmobject().m_lock);
  376. auto& page_slot = physical_page_slot(page_index_in_region);
  377. already_handled = !page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page();
  378. if (already_handled) {
  379. // Someone else already faulted in a new page in this slot. That's fine, we'll just remap with their page.
  380. new_physical_page = page_slot;
  381. } else {
  382. // Install the newly allocated page into the VMObject.
  383. page_slot = new_physical_page;
  384. }
  385. }
  386. if (!remap_vmobject_page(page_index_in_vmobject, *new_physical_page)) {
  387. dmesgln("MM: handle_zero_fault was unable to allocate a page table to map {}", new_physical_page);
  388. return PageFaultResponse::OutOfMemory;
  389. }
  390. return PageFaultResponse::Continue;
  391. }
  392. PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
  393. {
  394. auto current_thread = Thread::current();
  395. if (current_thread)
  396. current_thread->did_cow_fault();
  397. if (!vmobject().is_anonymous())
  398. return PageFaultResponse::ShouldCrash;
  399. auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
  400. auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(page_index_in_vmobject, vaddr().offset(page_index_in_region * PAGE_SIZE));
  401. if (!remap_vmobject_page(page_index_in_vmobject, *vmobject().physical_pages()[page_index_in_vmobject]))
  402. return PageFaultResponse::OutOfMemory;
  403. return response;
  404. }
  405. PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
  406. {
  407. VERIFY(vmobject().is_inode());
  408. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  409. auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
  410. auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
  411. auto& vmobject_physical_page_slot = inode_vmobject.physical_pages()[page_index_in_vmobject];
  412. {
  413. // NOTE: The VMObject lock is required when manipulating the VMObject's physical page slot.
  414. SpinlockLocker locker(inode_vmobject.m_lock);
  415. if (!vmobject_physical_page_slot.is_null()) {
  416. dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
  417. if (!remap_vmobject_page(page_index_in_vmobject, *vmobject_physical_page_slot))
  418. return PageFaultResponse::OutOfMemory;
  419. return PageFaultResponse::Continue;
  420. }
  421. }
  422. dbgln_if(PAGE_FAULT_DEBUG, "Inode fault in {} page index: {}", name(), page_index_in_region);
  423. auto current_thread = Thread::current();
  424. if (current_thread)
  425. current_thread->did_inode_fault();
  426. u8 page_buffer[PAGE_SIZE];
  427. auto& inode = inode_vmobject.inode();
  428. auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
  429. auto result = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
  430. if (result.is_error()) {
  431. dmesgln("handle_inode_fault: Error ({}) while reading from inode", result.error());
  432. return PageFaultResponse::ShouldCrash;
  433. }
  434. auto nread = result.value();
  435. // Note: If we received 0, it means we are at the end of file or after it,
  436. // which means we should return bus error.
  437. if (nread == 0)
  438. return PageFaultResponse::BusError;
  439. if (nread < PAGE_SIZE) {
  440. // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
  441. memset(page_buffer + nread, 0, PAGE_SIZE - nread);
  442. }
  443. // Allocate a new physical page, and copy the read inode contents into it.
  444. auto new_physical_page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::No);
  445. if (new_physical_page_or_error.is_error()) {
  446. dmesgln("MM: handle_inode_fault was unable to allocate a physical page");
  447. return PageFaultResponse::OutOfMemory;
  448. }
  449. auto new_physical_page = new_physical_page_or_error.release_value();
  450. {
  451. InterruptDisabler disabler;
  452. u8* dest_ptr = MM.quickmap_page(*new_physical_page);
  453. memcpy(dest_ptr, page_buffer, PAGE_SIZE);
  454. MM.unquickmap_page();
  455. }
  456. {
  457. // NOTE: The VMObject lock is required when manipulating the VMObject's physical page slot.
  458. SpinlockLocker locker(inode_vmobject.m_lock);
  459. if (!vmobject_physical_page_slot.is_null()) {
  460. // Someone else faulted in this page while we were reading from the inode.
  461. // No harm done (other than some duplicate work), remap the page here and return.
  462. dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else, remapping.");
  463. if (!remap_vmobject_page(page_index_in_vmobject, *vmobject_physical_page_slot))
  464. return PageFaultResponse::OutOfMemory;
  465. return PageFaultResponse::Continue;
  466. }
  467. vmobject_physical_page_slot = new_physical_page;
  468. }
  469. if (!remap_vmobject_page(page_index_in_vmobject, *vmobject_physical_page_slot))
  470. return PageFaultResponse::OutOfMemory;
  471. return PageFaultResponse::Continue;
  472. }
  473. RefPtr<PhysicalPage> Region::physical_page(size_t index) const
  474. {
  475. SpinlockLocker vmobject_locker(vmobject().m_lock);
  476. VERIFY(index < page_count());
  477. return vmobject().physical_pages()[first_page_index() + index];
  478. }
  479. RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
  480. {
  481. VERIFY(vmobject().m_lock.is_locked_by_current_processor());
  482. VERIFY(index < page_count());
  483. return vmobject().physical_pages()[first_page_index() + index];
  484. }
  485. }