Region.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Memory.h>
  27. #include <AK/StringView.h>
  28. #include <Kernel/FileSystem/Inode.h>
  29. #include <Kernel/Process.h>
  30. #include <Kernel/Thread.h>
  31. #include <Kernel/VM/AnonymousVMObject.h>
  32. #include <Kernel/VM/MemoryManager.h>
  33. #include <Kernel/VM/PageDirectory.h>
  34. #include <Kernel/VM/Region.h>
  35. #include <Kernel/VM/SharedInodeVMObject.h>
  36. //#define MM_DEBUG
  37. //#define PAGE_FAULT_DEBUG
  38. namespace Kernel {
  39. Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable, bool kernel, bool shared)
  40. : PurgeablePageRanges(vmobject)
  41. , m_range(range)
  42. , m_offset_in_vmobject(offset_in_vmobject)
  43. , m_vmobject(move(vmobject))
  44. , m_name(name)
  45. , m_access(access)
  46. , m_shared(shared)
  47. , m_cacheable(cacheable)
  48. , m_kernel(kernel)
  49. {
  50. register_purgeable_page_ranges();
  51. MM.register_region(*this);
  52. }
  53. Region::~Region()
  54. {
  55. unregister_purgeable_page_ranges();
  56. // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
  57. // Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
  58. // find the address<->region mappings in an invalid state there.
  59. ScopedSpinLock lock(s_mm_lock);
  60. if (m_page_directory) {
  61. unmap(ShouldDeallocateVirtualMemoryRange::Yes);
  62. ASSERT(!m_page_directory);
  63. }
  64. MM.unregister_region(*this);
  65. }
  66. void Region::register_purgeable_page_ranges()
  67. {
  68. if (m_vmobject->is_anonymous()) {
  69. auto& vmobject = static_cast<AnonymousVMObject&>(*m_vmobject);
  70. vmobject.register_purgeable_page_ranges(*this);
  71. }
  72. }
  73. void Region::unregister_purgeable_page_ranges()
  74. {
  75. if (m_vmobject->is_anonymous()) {
  76. auto& vmobject = static_cast<AnonymousVMObject&>(*m_vmobject);
  77. vmobject.unregister_purgeable_page_ranges(*this);
  78. }
  79. }
  80. OwnPtr<Region> Region::clone(Process& new_owner)
  81. {
  82. ASSERT(Process::current());
  83. ScopedSpinLock lock(s_mm_lock);
  84. if (m_inherit_mode == InheritMode::ZeroedOnFork) {
  85. ASSERT(m_mmap);
  86. ASSERT(!m_shared);
  87. ASSERT(vmobject().is_anonymous());
  88. auto new_vmobject = AnonymousVMObject::create_with_size(size(), AllocationStrategy::Reserve); // TODO: inherit committed non-volatile areas?
  89. if (!new_vmobject)
  90. return {};
  91. auto zeroed_region = Region::create_user_accessible(&new_owner, m_range, new_vmobject.release_nonnull(), 0, m_name, m_access);
  92. zeroed_region->copy_purgeable_page_ranges(*this);
  93. zeroed_region->set_mmap(m_mmap);
  94. zeroed_region->set_inherit_mode(m_inherit_mode);
  95. return zeroed_region;
  96. }
  97. if (m_shared) {
  98. ASSERT(!m_stack);
  99. #ifdef MM_DEBUG
  100. dbg() << "Region::clone(): Sharing " << name() << " (" << vaddr() << ")";
  101. #endif
  102. if (vmobject().is_inode())
  103. ASSERT(vmobject().is_shared_inode());
  104. // Create a new region backed by the same VMObject.
  105. auto region = Region::create_user_accessible(&new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access);
  106. if (m_vmobject->is_anonymous())
  107. region->copy_purgeable_page_ranges(*this);
  108. region->set_mmap(m_mmap);
  109. region->set_shared(m_shared);
  110. return region;
  111. }
  112. if (vmobject().is_inode())
  113. ASSERT(vmobject().is_private_inode());
  114. auto vmobject_clone = vmobject().clone();
  115. if (!vmobject_clone)
  116. return {};
  117. #ifdef MM_DEBUG
  118. dbg() << "Region::clone(): CoWing " << name() << " (" << vaddr() << ")";
  119. #endif
  120. // Set up a COW region. The parent (this) region becomes COW as well!
  121. remap();
  122. auto clone_region = Region::create_user_accessible(&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access);
  123. if (m_vmobject->is_anonymous())
  124. clone_region->copy_purgeable_page_ranges(*this);
  125. if (m_stack) {
  126. ASSERT(is_readable());
  127. ASSERT(is_writable());
  128. ASSERT(vmobject().is_anonymous());
  129. clone_region->set_stack(true);
  130. }
  131. clone_region->set_mmap(m_mmap);
  132. return clone_region;
  133. }
  134. void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
  135. {
  136. if (m_vmobject.ptr() == obj.ptr())
  137. return;
  138. unregister_purgeable_page_ranges();
  139. m_vmobject = move(obj);
  140. register_purgeable_page_ranges();
  141. }
  142. bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
  143. {
  144. if (!m_vmobject->is_anonymous())
  145. return false;
  146. auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
  147. size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
  148. size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
  149. return is_volatile_range({ first_page_index, last_page_index - first_page_index });
  150. }
  151. auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged) -> SetVolatileError
  152. {
  153. was_purged = false;
  154. if (!m_vmobject->is_anonymous())
  155. return SetVolatileError::NotPurgeable;
  156. auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
  157. if (is_volatile) {
  158. // If marking pages as volatile, be prudent by not marking
  159. // partial pages volatile to prevent potentially non-volatile
  160. // data to be discarded. So rund up the first page and round
  161. // down the last page.
  162. size_t first_page_index = PAGE_ROUND_UP(offset_in_vmobject) / PAGE_SIZE;
  163. size_t last_page_index = PAGE_ROUND_DOWN(offset_in_vmobject + size) / PAGE_SIZE;
  164. if (first_page_index != last_page_index)
  165. add_volatile_range({ first_page_index, last_page_index - first_page_index });
  166. } else {
  167. // If marking pages as non-volatile, round down the first page
  168. // and round up the last page to make sure the beginning and
  169. // end of the range doesn't inadvertedly get discarded.
  170. size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
  171. size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
  172. switch (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) {
  173. case PurgeablePageRanges::RemoveVolatileError::Success:
  174. case PurgeablePageRanges::RemoveVolatileError::SuccessNoChange:
  175. break;
  176. case PurgeablePageRanges::RemoveVolatileError::OutOfMemory:
  177. return SetVolatileError::OutOfMemory;
  178. }
  179. }
  180. return SetVolatileError::Success;
  181. }
  182. size_t Region::cow_pages() const
  183. {
  184. if (!vmobject().is_anonymous())
  185. return 0;
  186. return static_cast<const AnonymousVMObject&>(vmobject()).cow_pages();
  187. }
  188. size_t Region::amount_dirty() const
  189. {
  190. if (!vmobject().is_inode())
  191. return amount_resident();
  192. return static_cast<const InodeVMObject&>(vmobject()).amount_dirty();
  193. }
  194. size_t Region::amount_resident() const
  195. {
  196. size_t bytes = 0;
  197. for (size_t i = 0; i < page_count(); ++i) {
  198. auto* page = physical_page(i);
  199. if (page && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
  200. bytes += PAGE_SIZE;
  201. }
  202. return bytes;
  203. }
  204. size_t Region::amount_shared() const
  205. {
  206. size_t bytes = 0;
  207. for (size_t i = 0; i < page_count(); ++i) {
  208. auto* page = physical_page(i);
  209. if (page && page->ref_count() > 1 && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
  210. bytes += PAGE_SIZE;
  211. }
  212. return bytes;
  213. }
  214. NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared)
  215. {
  216. auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, false, shared);
  217. if (owner)
  218. region->m_owner = owner->make_weak_ptr();
  219. region->m_user_accessible = true;
  220. return region;
  221. }
  222. NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
  223. {
  224. auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, true, false);
  225. region->m_user_accessible = false;
  226. return region;
  227. }
  228. bool Region::should_cow(size_t page_index) const
  229. {
  230. if (!vmobject().is_anonymous())
  231. return false;
  232. return static_cast<const AnonymousVMObject&>(vmobject()).should_cow(first_page_index() + page_index, m_shared);
  233. }
  234. void Region::set_should_cow(size_t page_index, bool cow)
  235. {
  236. ASSERT(!m_shared);
  237. if (vmobject().is_anonymous())
  238. static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow);
  239. }
  240. bool Region::map_individual_page_impl(size_t page_index)
  241. {
  242. ASSERT(m_page_directory->get_lock().own_lock());
  243. auto page_vaddr = vaddr_from_page_index(page_index);
  244. auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
  245. if (!pte) {
  246. #ifdef MM_DEBUG
  247. dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << " " << name() << " cannot create PTE for " << page_vaddr;
  248. #endif
  249. return false;
  250. }
  251. auto* page = physical_page(page_index);
  252. if (!page || (!is_readable() && !is_writable())) {
  253. pte->clear();
  254. } else {
  255. pte->set_cache_disabled(!m_cacheable);
  256. pte->set_physical_page_base(page->paddr().get());
  257. pte->set_present(true);
  258. if (page->is_shared_zero_page() || page->is_lazy_committed_page() || should_cow(page_index))
  259. pte->set_writable(false);
  260. else
  261. pte->set_writable(is_writable());
  262. if (Processor::current().has_feature(CPUFeature::NX))
  263. pte->set_execute_disabled(!is_executable());
  264. pte->set_user_allowed(is_user_accessible());
  265. #ifdef MM_DEBUG
  266. dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte->raw() << "{" << pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")";
  267. #endif
  268. }
  269. return true;
  270. }
  271. bool Region::remap_page_range(size_t page_index, size_t page_count)
  272. {
  273. bool success = true;
  274. ScopedSpinLock lock(s_mm_lock);
  275. ASSERT(m_page_directory);
  276. ScopedSpinLock page_lock(m_page_directory->get_lock());
  277. size_t index = page_index;
  278. while (index < page_index + page_count) {
  279. if (!map_individual_page_impl(index)) {
  280. success = false;
  281. break;
  282. }
  283. index++;
  284. }
  285. if (index > page_index)
  286. MM.flush_tlb(vaddr_from_page_index(page_index), index - page_index);
  287. return success;
  288. }
  289. bool Region::remap_page(size_t page_index, bool with_flush)
  290. {
  291. ScopedSpinLock lock(s_mm_lock);
  292. ASSERT(m_page_directory);
  293. ScopedSpinLock page_lock(m_page_directory->get_lock());
  294. ASSERT(physical_page(page_index));
  295. bool success = map_individual_page_impl(page_index);
  296. if (with_flush)
  297. MM.flush_tlb(vaddr_from_page_index(page_index));
  298. return success;
  299. }
  300. void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
  301. {
  302. ScopedSpinLock lock(s_mm_lock);
  303. if (!m_page_directory)
  304. return;
  305. ScopedSpinLock page_lock(m_page_directory->get_lock());
  306. size_t count = page_count();
  307. for (size_t i = 0; i < count; ++i) {
  308. auto vaddr = vaddr_from_page_index(i);
  309. MM.release_pte(*m_page_directory, vaddr, i == count - 1);
  310. #ifdef MM_DEBUG
  311. auto* page = physical_page(i);
  312. dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
  313. #endif
  314. }
  315. MM.flush_tlb(vaddr(), page_count());
  316. if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
  317. if (m_page_directory->range_allocator().contains(range()))
  318. m_page_directory->range_allocator().deallocate(range());
  319. else
  320. m_page_directory->identity_range_allocator().deallocate(range());
  321. }
  322. m_page_directory = nullptr;
  323. }
  324. void Region::set_page_directory(PageDirectory& page_directory)
  325. {
  326. ASSERT(!m_page_directory || m_page_directory == &page_directory);
  327. ASSERT(s_mm_lock.own_lock());
  328. m_page_directory = page_directory;
  329. }
  330. bool Region::map(PageDirectory& page_directory)
  331. {
  332. ScopedSpinLock lock(s_mm_lock);
  333. ScopedSpinLock page_lock(page_directory.get_lock());
  334. set_page_directory(page_directory);
  335. #ifdef MM_DEBUG
  336. dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";
  337. #endif
  338. size_t page_index = 0;
  339. while (page_index < page_count()) {
  340. if (!map_individual_page_impl(page_index))
  341. break;
  342. ++page_index;
  343. }
  344. if (page_index > 0) {
  345. MM.flush_tlb(vaddr(), page_index);
  346. return page_index == page_count();
  347. }
  348. return false;
  349. }
  350. void Region::remap()
  351. {
  352. ASSERT(m_page_directory);
  353. map(*m_page_directory);
  354. }
  355. PageFaultResponse Region::handle_fault(const PageFault& fault)
  356. {
  357. ScopedSpinLock lock(s_mm_lock);
  358. auto page_index_in_region = page_index_from_address(fault.vaddr());
  359. if (fault.type() == PageFault::Type::PageNotPresent) {
  360. if (fault.is_read() && !is_readable()) {
  361. dbg() << "NP(non-readable) fault in Region{" << this << "}[" << page_index_in_region << "]";
  362. return PageFaultResponse::ShouldCrash;
  363. }
  364. if (fault.is_write() && !is_writable()) {
  365. dbg() << "NP(non-writable) write fault in Region{" << this << "}[" << page_index_in_region << "] at " << fault.vaddr();
  366. return PageFaultResponse::ShouldCrash;
  367. }
  368. if (vmobject().is_inode()) {
  369. #ifdef PAGE_FAULT_DEBUG
  370. dbg() << "NP(inode) fault in Region{" << this << "}[" << page_index_in_region << "]";
  371. #endif
  372. return handle_inode_fault(page_index_in_region);
  373. }
  374. auto& page_slot = physical_page_slot(page_index_in_region);
  375. if (page_slot->is_lazy_committed_page()) {
  376. page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region);
  377. remap_page(page_index_in_region);
  378. return PageFaultResponse::Continue;
  379. }
  380. #ifdef MAP_SHARED_ZERO_PAGE_LAZILY
  381. if (fault.is_read()) {
  382. page_slot = MM.shared_zero_page();
  383. remap_page(page_index_in_region);
  384. return PageFaultResponse::Continue;
  385. }
  386. return handle_zero_fault(page_index_in_region);
  387. #else
  388. dbg() << "BUG! Unexpected NP fault at " << fault.vaddr();
  389. return PageFaultResponse::ShouldCrash;
  390. #endif
  391. }
  392. ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
  393. if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
  394. #ifdef PAGE_FAULT_DEBUG
  395. dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "] at " << fault.vaddr();
  396. #endif
  397. auto* phys_page = physical_page(page_index_in_region);
  398. if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
  399. #ifdef PAGE_FAULT_DEBUG
  400. dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "] at " << fault.vaddr();
  401. #endif
  402. return handle_zero_fault(page_index_in_region);
  403. }
  404. return handle_cow_fault(page_index_in_region);
  405. }
  406. dbg() << "PV(error) fault in Region{" << this << "}[" << page_index_in_region << "] at " << fault.vaddr();
  407. return PageFaultResponse::ShouldCrash;
  408. }
  409. PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
  410. {
  411. ASSERT_INTERRUPTS_DISABLED();
  412. ASSERT(vmobject().is_anonymous());
  413. LOCKER(vmobject().m_paging_lock);
  414. auto& page_slot = physical_page_slot(page_index_in_region);
  415. if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
  416. #ifdef PAGE_FAULT_DEBUG
  417. dbg() << "MM: zero_page() but page already present. Fine with me!";
  418. #endif
  419. if (!remap_page(page_index_in_region))
  420. return PageFaultResponse::OutOfMemory;
  421. return PageFaultResponse::Continue;
  422. }
  423. auto current_thread = Thread::current();
  424. if (current_thread != nullptr)
  425. current_thread->did_zero_fault();
  426. if (page_slot->is_lazy_committed_page()) {
  427. page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region);
  428. #ifdef PAGE_FAULT_DEBUG
  429. dbg() << " >> ALLOCATED COMMITTED " << page_slot->paddr();
  430. #endif
  431. } else {
  432. page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
  433. if (page_slot.is_null()) {
  434. klog() << "MM: handle_zero_fault was unable to allocate a physical page";
  435. return PageFaultResponse::OutOfMemory;
  436. }
  437. #ifdef PAGE_FAULT_DEBUG
  438. dbg() << " >> ALLOCATED " << page_slot->paddr();
  439. #endif
  440. }
  441. if (!remap_page(page_index_in_region)) {
  442. klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot;
  443. return PageFaultResponse::OutOfMemory;
  444. }
  445. return PageFaultResponse::Continue;
  446. }
  447. PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
  448. {
  449. ASSERT_INTERRUPTS_DISABLED();
  450. auto current_thread = Thread::current();
  451. if (current_thread)
  452. current_thread->did_cow_fault();
  453. if (!vmobject().is_anonymous())
  454. return PageFaultResponse::ShouldCrash;
  455. auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(first_page_index() + page_index_in_region, vaddr().offset(page_index_in_region * PAGE_SIZE));
  456. if (!remap_page(page_index_in_region))
  457. return PageFaultResponse::OutOfMemory;
  458. return response;
  459. }
  460. PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
  461. {
  462. ASSERT_INTERRUPTS_DISABLED();
  463. ASSERT(vmobject().is_inode());
  464. LOCKER(vmobject().m_paging_lock);
  465. ASSERT_INTERRUPTS_DISABLED();
  466. auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
  467. auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region];
  468. #ifdef PAGE_FAULT_DEBUG
  469. dbg() << "Inode fault in " << name() << " page index: " << page_index_in_region;
  470. #endif
  471. if (!vmobject_physical_page_entry.is_null()) {
  472. #ifdef PAGE_FAULT_DEBUG
  473. dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!");
  474. #endif
  475. if (!remap_page(page_index_in_region))
  476. return PageFaultResponse::OutOfMemory;
  477. return PageFaultResponse::Continue;
  478. }
  479. auto current_thread = Thread::current();
  480. if (current_thread)
  481. current_thread->did_inode_fault();
  482. #ifdef MM_DEBUG
  483. dbg() << "MM: page_in_from_inode ready to read from inode";
  484. #endif
  485. u8 page_buffer[PAGE_SIZE];
  486. auto& inode = inode_vmobject.inode();
  487. auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
  488. auto nread = inode.read_bytes((first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
  489. if (nread < 0) {
  490. klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!";
  491. return PageFaultResponse::ShouldCrash;
  492. }
  493. if (nread < PAGE_SIZE) {
  494. // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
  495. memset(page_buffer + nread, 0, PAGE_SIZE - nread);
  496. }
  497. vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
  498. if (vmobject_physical_page_entry.is_null()) {
  499. klog() << "MM: handle_inode_fault was unable to allocate a physical page";
  500. return PageFaultResponse::OutOfMemory;
  501. }
  502. u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry);
  503. {
  504. void* fault_at;
  505. if (!safe_memcpy(dest_ptr, page_buffer, PAGE_SIZE, fault_at)) {
  506. if ((u8*)fault_at >= dest_ptr && (u8*)fault_at <= dest_ptr + PAGE_SIZE)
  507. dbg() << " >> inode fault: error copying data to " << vmobject_physical_page_entry->paddr() << "/" << VirtualAddress(dest_ptr) << ", failed at " << VirtualAddress(fault_at);
  508. else
  509. ASSERT_NOT_REACHED();
  510. }
  511. }
  512. MM.unquickmap_page();
  513. remap_page(page_index_in_region);
  514. return PageFaultResponse::Continue;
  515. }
  516. RefPtr<Process> Region::get_owner()
  517. {
  518. return m_owner.strong_ref();
  519. }
  520. }