Region.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. #include <Kernel/FileSystem/Inode.h>
  2. #include <Kernel/Process.h>
  3. #include <Kernel/Thread.h>
  4. #include <Kernel/VM/AnonymousVMObject.h>
  5. #include <Kernel/VM/InodeVMObject.h>
  6. #include <Kernel/VM/MemoryManager.h>
  7. #include <Kernel/VM/Region.h>
  8. //#define MM_DEBUG
  9. //#define PAGE_FAULT_DEBUG
  10. Region::Region(const Range& range, const String& name, u8 access)
  11. : m_range(range)
  12. , m_vmobject(AnonymousVMObject::create_with_size(size()))
  13. , m_name(name)
  14. , m_access(access)
  15. {
  16. MM.register_region(*this);
  17. }
  18. Region::Region(const Range& range, NonnullRefPtr<Inode> inode, const String& name, u8 access)
  19. : m_range(range)
  20. , m_vmobject(InodeVMObject::create_with_inode(*inode))
  21. , m_name(name)
  22. , m_access(access)
  23. {
  24. MM.register_region(*this);
  25. }
  26. Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access)
  27. : m_range(range)
  28. , m_offset_in_vmobject(offset_in_vmobject)
  29. , m_vmobject(move(vmobject))
  30. , m_name(name)
  31. , m_access(access)
  32. {
  33. MM.register_region(*this);
  34. }
  35. Region::~Region()
  36. {
  37. // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
  38. // Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
  39. // find the address<->region mappings in an invalid state there.
  40. InterruptDisabler disabler;
  41. if (m_page_directory) {
  42. unmap(ShouldDeallocateVirtualMemoryRange::Yes);
  43. ASSERT(!m_page_directory);
  44. }
  45. MM.unregister_region(*this);
  46. }
  47. NonnullOwnPtr<Region> Region::clone()
  48. {
  49. ASSERT(current);
  50. // FIXME: What should we do for privately mapped InodeVMObjects?
  51. if (m_shared || vmobject().is_inode()) {
  52. ASSERT(!m_stack);
  53. #ifdef MM_DEBUG
  54. dbgprintf("%s<%u> Region::clone(): sharing %s (V%p)\n",
  55. current->process().name().characters(),
  56. current->pid(),
  57. m_name.characters(),
  58. vaddr().get());
  59. #endif
  60. // Create a new region backed by the same VMObject.
  61. return Region::create_user_accessible(m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access);
  62. }
  63. #ifdef MM_DEBUG
  64. dbgprintf("%s<%u> Region::clone(): cowing %s (V%p)\n",
  65. current->process().name().characters(),
  66. current->pid(),
  67. m_name.characters(),
  68. vaddr().get());
  69. #endif
  70. // Set up a COW region. The parent (this) region becomes COW as well!
  71. ensure_cow_map().fill(true);
  72. remap();
  73. auto clone_region = Region::create_user_accessible(m_range, m_vmobject->clone(), m_offset_in_vmobject, m_name, m_access);
  74. clone_region->ensure_cow_map();
  75. if (m_stack) {
  76. ASSERT(is_readable());
  77. ASSERT(is_writable());
  78. ASSERT(!is_shared());
  79. ASSERT(vmobject().is_anonymous());
  80. clone_region->set_stack(true);
  81. }
  82. return clone_region;
  83. }
  84. bool Region::commit()
  85. {
  86. InterruptDisabler disabler;
  87. #ifdef MM_DEBUG
  88. dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get());
  89. #endif
  90. for (size_t i = 0; i < page_count(); ++i) {
  91. if (!commit(i))
  92. return false;
  93. }
  94. return true;
  95. }
  96. bool Region::commit(size_t page_index)
  97. {
  98. ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable());
  99. InterruptDisabler disabler;
  100. #ifdef MM_DEBUG
  101. dbgprintf("MM: commit single page (%zu) in Region %p (VMO=%p) at V%p\n", page_index, vmobject().page_count(), this, &vmobject(), vaddr().get());
  102. #endif
  103. auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index];
  104. if (!vmobject_physical_page_entry.is_null())
  105. return true;
  106. auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
  107. if (!physical_page) {
  108. kprintf("MM: commit was unable to allocate a physical page\n");
  109. return false;
  110. }
  111. vmobject_physical_page_entry = move(physical_page);
  112. remap_page(page_index);
  113. return true;
  114. }
  115. u32 Region::cow_pages() const
  116. {
  117. if (!m_cow_map)
  118. return 0;
  119. u32 count = 0;
  120. for (int i = 0; i < m_cow_map->size(); ++i)
  121. count += m_cow_map->get(i);
  122. return count;
  123. }
  124. size_t Region::amount_dirty() const
  125. {
  126. if (!vmobject().is_inode())
  127. return amount_resident();
  128. return static_cast<const InodeVMObject&>(vmobject()).amount_dirty();
  129. }
  130. size_t Region::amount_resident() const
  131. {
  132. size_t bytes = 0;
  133. for (size_t i = 0; i < page_count(); ++i) {
  134. if (m_vmobject->physical_pages()[first_page_index() + i])
  135. bytes += PAGE_SIZE;
  136. }
  137. return bytes;
  138. }
  139. size_t Region::amount_shared() const
  140. {
  141. size_t bytes = 0;
  142. for (size_t i = 0; i < page_count(); ++i) {
  143. auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
  144. if (physical_page && physical_page->ref_count() > 1)
  145. bytes += PAGE_SIZE;
  146. }
  147. return bytes;
  148. }
  149. NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access)
  150. {
  151. auto region = make<Region>(range, name, access);
  152. region->m_user_accessible = true;
  153. return region;
  154. }
  155. NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access)
  156. {
  157. auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access);
  158. region->m_user_accessible = true;
  159. return region;
  160. }
  161. NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access)
  162. {
  163. auto region = make<Region>(range, move(inode), name, access);
  164. region->m_user_accessible = true;
  165. return region;
  166. }
  167. NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access)
  168. {
  169. auto region = make<Region>(range, name, access);
  170. region->m_user_accessible = false;
  171. return region;
  172. }
  173. bool Region::should_cow(size_t page_index) const
  174. {
  175. if (m_shared)
  176. return false;
  177. return m_cow_map && m_cow_map->get(page_index);
  178. }
  179. void Region::set_should_cow(size_t page_index, bool cow)
  180. {
  181. ASSERT(!m_shared);
  182. ensure_cow_map().set(page_index, cow);
  183. }
  184. Bitmap& Region::ensure_cow_map() const
  185. {
  186. if (!m_cow_map)
  187. m_cow_map = make<Bitmap>(page_count(), true);
  188. return *m_cow_map;
  189. }
  190. void Region::map_individual_page_impl(size_t page_index)
  191. {
  192. auto page_vaddr = vaddr().offset(page_index * PAGE_SIZE);
  193. auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
  194. auto& physical_page = vmobject().physical_pages()[first_page_index() + page_index];
  195. if (!physical_page) {
  196. pte.set_physical_page_base(0);
  197. pte.set_present(false);
  198. } else {
  199. pte.set_physical_page_base(physical_page->paddr().get());
  200. pte.set_present(is_readable());
  201. if (should_cow(page_index))
  202. pte.set_writable(false);
  203. else
  204. pte.set_writable(is_writable());
  205. if (g_cpu_supports_nx)
  206. pte.set_execute_disabled(!is_executable());
  207. pte.set_user_allowed(is_user_accessible());
  208. }
  209. m_page_directory->flush(page_vaddr);
  210. #ifdef MM_DEBUG
  211. dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
  212. #endif
  213. }
  214. void Region::remap_page(size_t page_index)
  215. {
  216. ASSERT(m_page_directory);
  217. InterruptDisabler disabler;
  218. ASSERT(vmobject().physical_pages()[first_page_index() + page_index]);
  219. map_individual_page_impl(page_index);
  220. }
  221. void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
  222. {
  223. InterruptDisabler disabler;
  224. ASSERT(m_page_directory);
  225. for (size_t i = 0; i < page_count(); ++i) {
  226. auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
  227. auto& pte = MM.ensure_pte(*m_page_directory, vaddr);
  228. pte.set_physical_page_base(0);
  229. pte.set_present(false);
  230. pte.set_writable(false);
  231. pte.set_user_allowed(false);
  232. m_page_directory->flush(vaddr);
  233. #ifdef MM_DEBUG
  234. auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
  235. dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0);
  236. #endif
  237. }
  238. if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
  239. m_page_directory->range_allocator().deallocate(range());
  240. m_page_directory = nullptr;
  241. }
  242. void Region::map(PageDirectory& page_directory)
  243. {
  244. ASSERT(!m_page_directory || m_page_directory == &page_directory);
  245. InterruptDisabler disabler;
  246. m_page_directory = page_directory;
  247. #ifdef MM_DEBUG
  248. dbgprintf("MM: Region::map() will map VMO pages %u - %u (VMO page count: %u)\n", first_page_index(), last_page_index(), vmobject().page_count());
  249. #endif
  250. for (size_t page_index = 0; page_index < page_count(); ++page_index)
  251. map_individual_page_impl(page_index);
  252. }
  253. void Region::remap()
  254. {
  255. ASSERT(m_page_directory);
  256. map(*m_page_directory);
  257. }
  258. PageFaultResponse Region::handle_fault(const PageFault& fault)
  259. {
  260. auto page_index_in_region = page_index_from_address(fault.vaddr());
  261. if (fault.type() == PageFault::Type::PageNotPresent) {
  262. if (!is_readable()) {
  263. dbgprintf("NP(non-readable) fault in Region{%p}[%u]\n", this, page_index_in_region);
  264. return PageFaultResponse::ShouldCrash;
  265. }
  266. if (vmobject().is_inode()) {
  267. #ifdef PAGE_FAULT_DEBUG
  268. dbgprintf("NP(inode) fault in Region{%p}[%u]\n", this, page_index_in_region);
  269. #endif
  270. return handle_inode_fault(page_index_in_region);
  271. }
  272. #ifdef PAGE_FAULT_DEBUG
  273. dbgprintf("NP(zero) fault in Region{%p}[%u]\n", this, page_index_in_region);
  274. #endif
  275. return handle_zero_fault(page_index_in_region);
  276. }
  277. ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
  278. if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
  279. #ifdef PAGE_FAULT_DEBUG
  280. dbgprintf("PV(cow) fault in Region{%p}[%u]\n", this, page_index_in_region);
  281. #endif
  282. return handle_cow_fault(page_index_in_region);
  283. }
  284. kprintf("PV(error) fault in Region{%p}[%u] at V%p\n", this, page_index_in_region, fault.vaddr().get());
  285. return PageFaultResponse::ShouldCrash;
  286. }
  287. PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
  288. {
  289. ASSERT_INTERRUPTS_DISABLED();
  290. ASSERT(vmobject().is_anonymous());
  291. sti();
  292. LOCKER(vmobject().m_paging_lock);
  293. cli();
  294. auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
  295. if (!vmobject_physical_page_entry.is_null()) {
  296. #ifdef PAGE_FAULT_DEBUG
  297. dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
  298. #endif
  299. remap_page(page_index_in_region);
  300. return PageFaultResponse::Continue;
  301. }
  302. if (current)
  303. current->did_zero_fault();
  304. auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
  305. if (physical_page.is_null()) {
  306. kprintf("MM: handle_zero_fault was unable to allocate a physical page\n");
  307. return PageFaultResponse::ShouldCrash;
  308. }
  309. #ifdef PAGE_FAULT_DEBUG
  310. dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get());
  311. #endif
  312. vmobject_physical_page_entry = move(physical_page);
  313. remap_page(page_index_in_region);
  314. return PageFaultResponse::Continue;
  315. }
  316. PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
  317. {
  318. ASSERT_INTERRUPTS_DISABLED();
  319. auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
  320. if (vmobject_physical_page_entry->ref_count() == 1) {
  321. #ifdef PAGE_FAULT_DEBUG
  322. dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
  323. #endif
  324. set_should_cow(page_index_in_region, false);
  325. remap_page(page_index_in_region);
  326. return PageFaultResponse::Continue;
  327. }
  328. if (current)
  329. current->did_cow_fault();
  330. #ifdef PAGE_FAULT_DEBUG
  331. dbgprintf(" >> It's a COW page and it's time to COW!\n");
  332. #endif
  333. auto physical_page_to_copy = move(vmobject_physical_page_entry);
  334. auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
  335. if (physical_page.is_null()) {
  336. kprintf("MM: handle_cow_fault was unable to allocate a physical page\n");
  337. return PageFaultResponse::ShouldCrash;
  338. }
  339. u8* dest_ptr = MM.quickmap_page(*physical_page);
  340. const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
  341. #ifdef PAGE_FAULT_DEBUG
  342. dbgprintf(" >> COW P%p <- P%p\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
  343. #endif
  344. memcpy(dest_ptr, src_ptr, PAGE_SIZE);
  345. vmobject_physical_page_entry = move(physical_page);
  346. MM.unquickmap_page();
  347. set_should_cow(page_index_in_region, false);
  348. remap_page(page_index_in_region);
  349. return PageFaultResponse::Continue;
  350. }
  351. PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
  352. {
  353. ASSERT_INTERRUPTS_DISABLED();
  354. ASSERT(vmobject().is_inode());
  355. auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
  356. auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region];
  357. sti();
  358. LOCKER(vmobject().m_paging_lock);
  359. cli();
  360. #ifdef PAGE_FAULT_DEBUG
  361. dbg() << *current << " inode fault in " << name() << " page index: " << page_index_in_region;
  362. #endif
  363. if (!vmobject_physical_page_entry.is_null()) {
  364. #ifdef PAGE_FAULT_DEBUG
  365. dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
  366. #endif
  367. remap_page(page_index_in_region);
  368. return PageFaultResponse::Continue;
  369. }
  370. if (current)
  371. current->did_inode_fault();
  372. #ifdef MM_DEBUG
  373. dbgprintf("MM: page_in_from_inode ready to read from inode\n");
  374. #endif
  375. sti();
  376. u8 page_buffer[PAGE_SIZE];
  377. auto& inode = inode_vmobject.inode();
  378. auto nread = inode.read_bytes((first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr);
  379. if (nread < 0) {
  380. kprintf("MM: handle_inode_fault had error (%d) while reading!\n", nread);
  381. return PageFaultResponse::ShouldCrash;
  382. }
  383. if (nread < PAGE_SIZE) {
  384. // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
  385. memset(page_buffer + nread, 0, PAGE_SIZE - nread);
  386. }
  387. cli();
  388. vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
  389. if (vmobject_physical_page_entry.is_null()) {
  390. kprintf("MM: handle_inode_fault was unable to allocate a physical page\n");
  391. return PageFaultResponse::ShouldCrash;
  392. }
  393. u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry);
  394. memcpy(dest_ptr, page_buffer, PAGE_SIZE);
  395. MM.unquickmap_page();
  396. remap_page(page_index_in_region);
  397. return PageFaultResponse::Continue;
  398. }