MemoryManager.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. #include "MemoryManager.h"
  2. #include <AK/Assertions.h>
  3. #include <AK/kstdio.h>
  4. #include <AK/kmalloc.h>
  5. #include "i386.h"
  6. #include "StdLib.h"
  7. #include "Process.h"
  8. //#define MM_DEBUG
  9. //#define PAGE_FAULT_DEBUG
  10. #define SCRUB_DEALLOCATED_PAGE_TABLES
  11. static MemoryManager* s_the;
  12. MemoryManager& MM
  13. {
  14. return *s_the;
  15. }
  16. MemoryManager::MemoryManager()
  17. {
  18. m_kernel_page_directory = (PageDirectory*)0x4000;
  19. m_pageTableZero = (dword*)0x6000;
  20. m_pageTableOne = (dword*)0x7000;
  21. m_next_laddr.set(0xd0000000);
  22. initializePaging();
  23. }
  24. MemoryManager::~MemoryManager()
  25. {
  26. }
  27. void MemoryManager::populate_page_directory(PageDirectory& page_directory)
  28. {
  29. memset(&page_directory, 0, sizeof(PageDirectory));
  30. page_directory.entries[0] = m_kernel_page_directory->entries[0];
  31. page_directory.entries[1] = m_kernel_page_directory->entries[1];
  32. }
  33. void MemoryManager::release_page_directory(PageDirectory& page_directory)
  34. {
  35. ASSERT_INTERRUPTS_DISABLED();
  36. #ifdef MM_DEBUG
  37. dbgprintf("MM: release_page_directory for PD K%x\n", &page_directory);
  38. #endif
  39. for (size_t i = 0; i < 1024; ++i) {
  40. auto& page_table = page_directory.physical_pages[i];
  41. if (!page_table.is_null()) {
  42. #ifdef MM_DEBUG
  43. dbgprintf("MM: deallocating user page table P%x\n", page_table->paddr().get());
  44. #endif
  45. deallocate_page_table(page_directory, i);
  46. }
  47. }
  48. #ifdef SCRUB_DEALLOCATED_PAGE_TABLES
  49. memset(&page_directory, 0xc9, sizeof(PageDirectory));
  50. #endif
  51. }
  52. void MemoryManager::initializePaging()
  53. {
  54. static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
  55. static_assert(sizeof(MemoryManager::PageTableEntry) == 4);
  56. memset(m_pageTableZero, 0, PAGE_SIZE);
  57. memset(m_pageTableOne, 0, PAGE_SIZE);
  58. memset(m_kernel_page_directory, 0, sizeof(PageDirectory));
  59. #ifdef MM_DEBUG
  60. dbgprintf("MM: Kernel page directory @ %p\n", m_kernel_page_directory);
  61. #endif
  62. // Make null dereferences crash.
  63. protectMap(LinearAddress(0), PAGE_SIZE);
  64. // The bottom 4 MB are identity mapped & supervisor only. Every process shares these mappings.
  65. create_identity_mapping(LinearAddress(PAGE_SIZE), 4 * MB);
  66. // The physical pages 4 MB through 8 MB are available for allocation.
  67. for (size_t i = (4 * MB) + PAGE_SIZE; i < (8 * MB); i += PAGE_SIZE)
  68. m_free_physical_pages.append(adopt(*new PhysicalPage(PhysicalAddress(i))));
  69. asm volatile("movl %%eax, %%cr3"::"a"(m_kernel_page_directory));
  70. asm volatile(
  71. "movl %cr0, %eax\n"
  72. "orl $0x80000001, %eax\n"
  73. "movl %eax, %cr0\n"
  74. );
  75. }
  76. RetainPtr<PhysicalPage> MemoryManager::allocate_page_table(PageDirectory& page_directory, unsigned index)
  77. {
  78. auto& page_directory_physical_ptr = page_directory.physical_pages[index];
  79. ASSERT(!page_directory_physical_ptr);
  80. auto ppages = allocate_physical_pages(1);
  81. ASSERT(ppages.size() == 1);
  82. dword address = ppages[0]->paddr().get();
  83. create_identity_mapping(LinearAddress(address), PAGE_SIZE);
  84. memset((void*)address, 0, PAGE_SIZE);
  85. page_directory.physical_pages[index] = move(ppages[0]);
  86. return page_directory.physical_pages[index];
  87. }
  88. void MemoryManager::deallocate_page_table(PageDirectory& page_directory, unsigned index)
  89. {
  90. auto& physical_page = page_directory.physical_pages[index];
  91. ASSERT(physical_page);
  92. //FIXME: This line is buggy and effectful somehow :(
  93. //ASSERT(!m_free_physical_pages.contains_slow(physical_page));
  94. for (size_t i = 0; i < MM.m_free_physical_pages.size(); ++i) {
  95. ASSERT(MM.m_free_physical_pages[i].ptr() != physical_page.ptr());
  96. }
  97. remove_identity_mapping(LinearAddress(physical_page->paddr().get()), PAGE_SIZE);
  98. page_directory.physical_pages[index] = nullptr;
  99. }
  100. void MemoryManager::remove_identity_mapping(LinearAddress laddr, size_t size)
  101. {
  102. InterruptDisabler disabler;
  103. // FIXME: ASSERT(laddr is 4KB aligned);
  104. for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
  105. auto pte_address = laddr.offset(offset);
  106. auto pte = ensurePTE(m_kernel_page_directory, pte_address);
  107. pte.setPhysicalPageBase(0);
  108. pte.setUserAllowed(false);
  109. pte.setPresent(true);
  110. pte.setWritable(true);
  111. flushTLB(pte_address);
  112. }
  113. }
  114. auto MemoryManager::ensurePTE(PageDirectory* page_directory, LinearAddress laddr) -> PageTableEntry
  115. {
  116. ASSERT_INTERRUPTS_DISABLED();
  117. dword page_directory_index = (laddr.get() >> 22) & 0x3ff;
  118. dword page_table_index = (laddr.get() >> 12) & 0x3ff;
  119. PageDirectoryEntry pde = PageDirectoryEntry(&page_directory->entries[page_directory_index]);
  120. if (!pde.isPresent()) {
  121. #ifdef MM_DEBUG
  122. dbgprintf("MM: PDE %u not present, allocating\n", page_directory_index);
  123. #endif
  124. if (page_directory_index == 0) {
  125. ASSERT(page_directory == m_kernel_page_directory);
  126. pde.setPageTableBase((dword)m_pageTableZero);
  127. pde.setUserAllowed(false);
  128. pde.setPresent(true);
  129. pde.setWritable(true);
  130. } else if (page_directory_index == 1) {
  131. ASSERT(page_directory == m_kernel_page_directory);
  132. pde.setPageTableBase((dword)m_pageTableOne);
  133. pde.setUserAllowed(false);
  134. pde.setPresent(true);
  135. pde.setWritable(true);
  136. } else {
  137. auto page_table = allocate_page_table(*page_directory, page_directory_index);
  138. #ifdef MM_DEBUG
  139. dbgprintf("MM: PD K%x (%s) allocated page table #%u (for L%x) at P%x\n",
  140. page_directory,
  141. page_directory == m_kernel_page_directory ? "Kernel" : "User",
  142. page_directory_index,
  143. laddr.get(),
  144. page_table->paddr().get());
  145. #endif
  146. pde.setPageTableBase(page_table->paddr().get());
  147. pde.setUserAllowed(true);
  148. pde.setPresent(true);
  149. pde.setWritable(true);
  150. page_directory->physical_pages[page_directory_index] = move(page_table);
  151. }
  152. }
  153. return PageTableEntry(&pde.pageTableBase()[page_table_index]);
  154. }
  155. void MemoryManager::protectMap(LinearAddress linearAddress, size_t length)
  156. {
  157. InterruptDisabler disabler;
  158. // FIXME: ASSERT(linearAddress is 4KB aligned);
  159. for (dword offset = 0; offset < length; offset += PAGE_SIZE) {
  160. auto pteAddress = linearAddress.offset(offset);
  161. auto pte = ensurePTE(m_kernel_page_directory, pteAddress);
  162. pte.setPhysicalPageBase(pteAddress.get());
  163. pte.setUserAllowed(false);
  164. pte.setPresent(false);
  165. pte.setWritable(false);
  166. flushTLB(pteAddress);
  167. }
  168. }
  169. void MemoryManager::create_identity_mapping(LinearAddress laddr, size_t size)
  170. {
  171. InterruptDisabler disabler;
  172. // FIXME: ASSERT(laddr is 4KB aligned);
  173. for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
  174. auto pteAddress = laddr.offset(offset);
  175. auto pte = ensurePTE(m_kernel_page_directory, pteAddress);
  176. pte.setPhysicalPageBase(pteAddress.get());
  177. pte.setUserAllowed(false);
  178. pte.setPresent(true);
  179. pte.setWritable(true);
  180. flushTLB(pteAddress);
  181. }
  182. }
  183. void MemoryManager::initialize()
  184. {
  185. s_the = new MemoryManager;
  186. }
  187. Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
  188. {
  189. ASSERT_INTERRUPTS_DISABLED();
  190. // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
  191. for (auto& region : process.m_regions) {
  192. if (region->contains(laddr))
  193. return region.ptr();
  194. }
  195. kprintf("%s(%u) Couldn't find region for L%x\n", process.name().characters(), process.pid(), laddr.get());
  196. process.dumpRegions();
  197. ASSERT_NOT_REACHED();
  198. }
  199. bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned page_index_in_region)
  200. {
  201. ASSERT_INTERRUPTS_DISABLED();
  202. if (region.physical_pages[page_index_in_region]->retain_count() == 1) {
  203. #ifdef PAGE_FAULT_DEBUG
  204. dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
  205. #endif
  206. region.cow_map.set(page_index_in_region, false);
  207. remap_region_page(process.m_page_directory, region, page_index_in_region, true);
  208. return true;
  209. }
  210. #ifdef PAGE_FAULT_DEBUG
  211. dbgprintf(" >> It's a COW page and it's time to COW!\n");
  212. #endif
  213. auto physical_page_to_copy = move(region.physical_pages[page_index_in_region]);
  214. auto ppages = allocate_physical_pages(1);
  215. ASSERT(ppages.size() == 1);
  216. byte* dest_ptr = quickmap_page(*ppages[0]);
  217. const byte* src_ptr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE).asPtr();
  218. #ifdef PAGE_FAULT_DEBUG
  219. dbgprintf(" >> COW P%x <- P%x\n", ppages[0]->paddr().get(), physical_page_to_copy->paddr().get());
  220. #endif
  221. memcpy(dest_ptr, src_ptr, PAGE_SIZE);
  222. region.physical_pages[page_index_in_region] = move(ppages[0]);
  223. unquickmap_page();
  224. region.cow_map.set(page_index_in_region, false);
  225. remap_region_page(process.m_page_directory, region, page_index_in_region, true);
  226. return true;
  227. }
  228. PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
  229. {
  230. ASSERT_INTERRUPTS_DISABLED();
  231. #ifdef PAGE_FAULT_DEBUG
  232. dbgprintf("MM: handle_page_fault(%w) at L%x\n", fault.code(), fault.laddr().get());
  233. #endif
  234. auto* region = region_from_laddr(*current, fault.laddr());
  235. ASSERT(region);
  236. auto page_index_in_region = region->page_index_from_address(fault.laddr());
  237. if (fault.is_not_present()) {
  238. kprintf(" >> NP fault in Region{%p}[%u]\n", region, page_index_in_region);
  239. } else if (fault.is_protection_violation()) {
  240. if (region->cow_map.get(page_index_in_region)) {
  241. #ifdef PAGE_FAULT_DEBUG
  242. dbgprintf(" >> PV (COW) fault in Region{%p}[%u]\n", region, page_index_in_region);
  243. #endif
  244. bool success = copy_on_write(*current, *region, page_index_in_region);
  245. ASSERT(success);
  246. return PageFaultResponse::Continue;
  247. }
  248. kprintf(" >> PV fault in Region{%p}[%u]\n", region, page_index_in_region);
  249. } else {
  250. ASSERT_NOT_REACHED();
  251. }
  252. return PageFaultResponse::ShouldCrash;
  253. }
  254. Vector<RetainPtr<PhysicalPage>> MemoryManager::allocate_physical_pages(size_t count)
  255. {
  256. InterruptDisabler disabler;
  257. if (count > m_free_physical_pages.size())
  258. return { };
  259. Vector<RetainPtr<PhysicalPage>> pages;
  260. pages.ensureCapacity(count);
  261. for (size_t i = 0; i < count; ++i) {
  262. pages.append(m_free_physical_pages.takeLast());
  263. #ifdef MM_DEBUG
  264. dbgprintf("MM: allocate_physical_pages vending P%x\n", pages.last()->paddr().get());
  265. #endif
  266. }
  267. return pages;
  268. }
  269. void MemoryManager::enter_kernel_paging_scope()
  270. {
  271. InterruptDisabler disabler;
  272. current->m_tss.cr3 = (dword)m_kernel_page_directory;
  273. asm volatile("movl %%eax, %%cr3"::"a"(m_kernel_page_directory):"memory");
  274. }
  275. void MemoryManager::enter_process_paging_scope(Process& process)
  276. {
  277. InterruptDisabler disabler;
  278. current->m_tss.cr3 = (dword)process.m_page_directory;
  279. asm volatile("movl %%eax, %%cr3"::"a"(process.m_page_directory):"memory");
  280. }
  281. void MemoryManager::flushEntireTLB()
  282. {
  283. asm volatile(
  284. "mov %cr3, %eax\n"
  285. "mov %eax, %cr3\n"
  286. );
  287. }
  288. void MemoryManager::flushTLB(LinearAddress laddr)
  289. {
  290. asm volatile("invlpg %0": :"m" (*(char*)laddr.get()) : "memory");
  291. }
  292. byte* MemoryManager::quickmap_page(PhysicalPage& physical_page)
  293. {
  294. ASSERT_INTERRUPTS_DISABLED();
  295. auto page_laddr = LinearAddress(4 * MB);
  296. auto pte = ensurePTE(m_kernel_page_directory, page_laddr);
  297. pte.setPhysicalPageBase(physical_page.paddr().get());
  298. pte.setPresent(true); // FIXME: Maybe we should use the is_readable flag here?
  299. pte.setWritable(true);
  300. pte.setUserAllowed(false);
  301. flushTLB(page_laddr);
  302. #ifdef MM_DEBUG
  303. dbgprintf("MM: >> quickmap_page L%x => P%x\n", page_laddr, physical_page.paddr().get());
  304. #endif
  305. return page_laddr.asPtr();
  306. }
  307. void MemoryManager::unquickmap_page()
  308. {
  309. ASSERT_INTERRUPTS_DISABLED();
  310. auto page_laddr = LinearAddress(4 * MB);
  311. auto pte = ensurePTE(m_kernel_page_directory, page_laddr);
  312. #ifdef MM_DEBUG
  313. auto old_physical_address = pte.physicalPageBase();
  314. #endif
  315. pte.setPhysicalPageBase(0);
  316. pte.setPresent(false);
  317. pte.setWritable(false);
  318. pte.setUserAllowed(false);
  319. flushTLB(page_laddr);
  320. #ifdef MM_DEBUG
  321. dbgprintf("MM: >> unquickmap_page L%x =/> P%x\n", page_laddr, old_physical_address);
  322. #endif
  323. }
  324. void MemoryManager::remap_region_page(PageDirectory* page_directory, Region& region, unsigned page_index_in_region, bool user_allowed)
  325. {
  326. InterruptDisabler disabler;
  327. auto page_laddr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE);
  328. auto pte = ensurePTE(page_directory, page_laddr);
  329. auto& physical_page = region.physical_pages[page_index_in_region];
  330. ASSERT(physical_page);
  331. pte.setPhysicalPageBase(physical_page->paddr().get());
  332. pte.setPresent(true); // FIXME: Maybe we should use the is_readable flag here?
  333. if (region.cow_map.get(page_index_in_region))
  334. pte.setWritable(false);
  335. else
  336. pte.setWritable(region.is_writable);
  337. pte.setUserAllowed(user_allowed);
  338. flushTLB(page_laddr);
  339. #ifdef MM_DEBUG
  340. dbgprintf("MM: >> remap_region_page (PD=%x) '%s' L%x => P%x (@%p)\n", page_directory, region.name.characters(), page_laddr.get(), physical_page->paddr().get(), physical_page.ptr());
  341. #endif
  342. }
  343. void MemoryManager::remap_region(Process& process, Region& region)
  344. {
  345. InterruptDisabler disabler;
  346. map_region_at_address(process.m_page_directory, region, region.linearAddress, true);
  347. }
  348. void MemoryManager::map_region_at_address(PageDirectory* page_directory, Region& region, LinearAddress laddr, bool user_allowed)
  349. {
  350. InterruptDisabler disabler;
  351. for (size_t i = 0; i < region.physical_pages.size(); ++i) {
  352. auto page_laddr = laddr.offset(i * PAGE_SIZE);
  353. auto pte = ensurePTE(page_directory, page_laddr);
  354. auto& physical_page = region.physical_pages[i];
  355. if (physical_page) {
  356. pte.setPhysicalPageBase(physical_page->paddr().get());
  357. pte.setPresent(true); // FIXME: Maybe we should use the is_readable flag here?
  358. if (region.cow_map.get(i))
  359. pte.setWritable(false);
  360. else
  361. pte.setWritable(region.is_writable);
  362. } else {
  363. pte.setPhysicalPageBase(0);
  364. pte.setPresent(false);
  365. pte.setWritable(region.is_writable);
  366. }
  367. pte.setUserAllowed(user_allowed);
  368. flushTLB(page_laddr);
  369. #ifdef MM_DEBUG
  370. dbgprintf("MM: >> map_region_at_address (PD=%x) '%s' L%x => P%x (@%p)\n", page_directory, region.name.characters(), page_laddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
  371. #endif
  372. }
  373. }
  374. void MemoryManager::unmap_range(PageDirectory* page_directory, LinearAddress laddr, size_t size)
  375. {
  376. ASSERT((size % PAGE_SIZE) == 0);
  377. InterruptDisabler disabler;
  378. size_t numPages = size / PAGE_SIZE;
  379. for (size_t i = 0; i < numPages; ++i) {
  380. auto page_laddr = laddr.offset(i * PAGE_SIZE);
  381. auto pte = ensurePTE(page_directory, page_laddr);
  382. pte.setPhysicalPageBase(0);
  383. pte.setPresent(false);
  384. pte.setWritable(false);
  385. pte.setUserAllowed(false);
  386. flushTLB(page_laddr);
  387. #ifdef MM_DEBUG
  388. dbgprintf("MM: << unmap_range L%x =/> 0\n", page_laddr);
  389. #endif
  390. }
  391. }
  392. LinearAddress MemoryManager::allocate_linear_address_range(size_t size)
  393. {
  394. ASSERT((size % PAGE_SIZE) == 0);
  395. // FIXME: Recycle ranges!
  396. auto laddr = m_next_laddr;
  397. m_next_laddr.set(m_next_laddr.get() + size);
  398. return laddr;
  399. }
  400. byte* MemoryManager::create_kernel_alias_for_region(Region& region)
  401. {
  402. InterruptDisabler disabler;
  403. #ifdef MM_DEBUG
  404. dbgprintf("MM: create_kernel_alias_for_region region=%p (L%x size=%u)\n", &region, region.linearAddress.get(), region.size);
  405. #endif
  406. auto laddr = allocate_linear_address_range(region.size);
  407. map_region_at_address(m_kernel_page_directory, region, laddr, false);
  408. #ifdef MM_DEBUG
  409. dbgprintf("MM: Created alias L%x for L%x\n", laddr.get(), region.linearAddress.get());
  410. #endif
  411. return laddr.asPtr();
  412. }
  413. void MemoryManager::remove_kernel_alias_for_region(Region& region, byte* addr)
  414. {
  415. #ifdef MM_DEBUG
  416. dbgprintf("remove_kernel_alias_for_region region=%p, addr=L%x\n", &region, addr);
  417. #endif
  418. unmap_range(m_kernel_page_directory, LinearAddress((dword)addr), region.size);
  419. }
  420. bool MemoryManager::unmapRegion(Process& process, Region& region)
  421. {
  422. InterruptDisabler disabler;
  423. for (size_t i = 0; i < region.physical_pages.size(); ++i) {
  424. auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
  425. auto pte = ensurePTE(process.m_page_directory, laddr);
  426. pte.setPhysicalPageBase(0);
  427. pte.setPresent(false);
  428. pte.setWritable(false);
  429. pte.setUserAllowed(false);
  430. flushTLB(laddr);
  431. #ifdef MM_DEBUG
  432. auto& physical_page = region.physical_pages[i];
  433. dbgprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, physical_page ? physical_page->paddr().get() : 0);
  434. #endif
  435. }
  436. return true;
  437. }
  438. bool MemoryManager::mapRegion(Process& process, Region& region)
  439. {
  440. map_region_at_address(process.m_page_directory, region, region.linearAddress, true);
  441. return true;
  442. }
  443. bool MemoryManager::validate_user_read(const Process& process, LinearAddress laddr) const
  444. {
  445. dword pageDirectoryIndex = (laddr.get() >> 22) & 0x3ff;
  446. dword pageTableIndex = (laddr.get() >> 12) & 0x3ff;
  447. auto pde = PageDirectoryEntry(&process.m_page_directory->entries[pageDirectoryIndex]);
  448. if (!pde.isPresent())
  449. return false;
  450. auto pte = PageTableEntry(&pde.pageTableBase()[pageTableIndex]);
  451. if (!pte.isPresent())
  452. return false;
  453. if (!pte.isUserAllowed())
  454. return false;
  455. return true;
  456. }
  457. bool MemoryManager::validate_user_write(const Process& process, LinearAddress laddr) const
  458. {
  459. dword pageDirectoryIndex = (laddr.get() >> 22) & 0x3ff;
  460. dword pageTableIndex = (laddr.get() >> 12) & 0x3ff;
  461. auto pde = PageDirectoryEntry(&process.m_page_directory->entries[pageDirectoryIndex]);
  462. if (!pde.isPresent())
  463. return false;
  464. auto pte = PageTableEntry(&pde.pageTableBase()[pageTableIndex]);
  465. if (!pte.isPresent())
  466. return false;
  467. if (!pte.isUserAllowed())
  468. return false;
  469. if (!pte.isWritable())
  470. return false;
  471. return true;
  472. }
  473. RetainPtr<Region> Region::clone()
  474. {
  475. InterruptDisabler disabler;
  476. if (is_readable && !is_writable) {
  477. // Create a new region backed by the same physical pages.
  478. return adopt(*new Region(linearAddress, size, physical_pages, String(name), is_readable, is_writable));
  479. }
  480. // Set up a COW region. The parent (this) region becomes COW as well!
  481. for (size_t i = 0; i < physical_pages.size(); ++i)
  482. cow_map.set(i, true);
  483. MM.remap_region(*current, *this);
  484. return adopt(*new Region(linearAddress, size, physical_pages, String(name), is_readable, is_writable, true));
  485. }
  486. Region::Region(LinearAddress a, size_t s, Vector<RetainPtr<PhysicalPage>> pp, String&& n, bool r, bool w, bool cow)
  487. : linearAddress(a)
  488. , size(s)
  489. , physical_pages(move(pp))
  490. , name(move(n))
  491. , is_readable(r)
  492. , is_writable(w)
  493. , cow_map(Bitmap::create(physical_pages.size(), cow))
  494. {
  495. }
  496. Region::~Region()
  497. {
  498. }
  499. void PhysicalPage::return_to_freelist()
  500. {
  501. InterruptDisabler disabler;
  502. m_retain_count = 1;
  503. MM.m_free_physical_pages.append(adopt(*this));
  504. #ifdef MM_DEBUG
  505. dbgprintf("MM: P%x released to freelist\n", m_paddr.get());
  506. #endif
  507. }