MemoryManager.cpp 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224
  1. /*
  2. * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/Memory.h>
  8. #include <AK/StringView.h>
  9. #include <Kernel/Arch/x86/PageFault.h>
  10. #include <Kernel/BootInfo.h>
  11. #include <Kernel/CMOS.h>
  12. #include <Kernel/FileSystem/Inode.h>
  13. #include <Kernel/Heap/kmalloc.h>
  14. #include <Kernel/KSyms.h>
  15. #include <Kernel/Memory/AnonymousVMObject.h>
  16. #include <Kernel/Memory/MemoryManager.h>
  17. #include <Kernel/Memory/PageDirectory.h>
  18. #include <Kernel/Memory/PhysicalRegion.h>
  19. #include <Kernel/Memory/SharedInodeVMObject.h>
  20. #include <Kernel/Multiboot.h>
  21. #include <Kernel/Panic.h>
  22. #include <Kernel/Process.h>
  23. #include <Kernel/Sections.h>
  24. #include <Kernel/StdLib.h>
  25. extern u8 start_of_kernel_image[];
  26. extern u8 end_of_kernel_image[];
  27. extern u8 start_of_kernel_text[];
  28. extern u8 start_of_kernel_data[];
  29. extern u8 end_of_kernel_bss[];
  30. extern u8 start_of_ro_after_init[];
  31. extern u8 end_of_ro_after_init[];
  32. extern u8 start_of_unmap_after_init[];
  33. extern u8 end_of_unmap_after_init[];
  34. extern u8 start_of_kernel_ksyms[];
  35. extern u8 end_of_kernel_ksyms[];
  36. extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
  37. extern size_t multiboot_copy_boot_modules_count;
  38. // Treat the super pages as logically separate from .bss
  39. // FIXME: Find a solution so we don't need to expand this range each time
  40. // we are in a situation too many drivers try to allocate super pages.
  41. __attribute__((section(".super_pages"))) static u8 super_pages[4 * MiB];
  42. namespace Kernel::Memory {
  43. ErrorOr<FlatPtr> page_round_up(FlatPtr x)
  44. {
  45. if (x > (explode_byte(0xFF) & ~0xFFF)) {
  46. return Error::from_errno(EINVAL);
  47. }
  48. return (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
  49. }
  50. // NOTE: We can NOT use Singleton for this class, because
  51. // MemoryManager::initialize is called *before* global constructors are
  52. // run. If we do, then Singleton would get re-initialized, causing
  53. // the memory manager to be initialized twice!
  54. static MemoryManager* s_the;
  55. RecursiveSpinlock s_mm_lock { LockRank::MemoryManager };
  56. MemoryManager& MemoryManager::the()
  57. {
  58. return *s_the;
  59. }
  60. bool MemoryManager::is_initialized()
  61. {
  62. return s_the != nullptr;
  63. }
  64. UNMAP_AFTER_INIT MemoryManager::MemoryManager()
  65. {
  66. s_the = this;
  67. SpinlockLocker lock(s_mm_lock);
  68. parse_memory_map();
  69. write_cr3(kernel_page_directory().cr3());
  70. protect_kernel_image();
  71. // We're temporarily "committing" to two pages that we need to allocate below
  72. auto committed_pages = commit_user_physical_pages(2).release_value();
  73. m_shared_zero_page = committed_pages.take_one();
  74. // We're wasting a page here, we just need a special tag (physical
  75. // address) so that we know when we need to lazily allocate a page
  76. // that we should be drawing this page from the committed pool rather
  77. // than potentially failing if no pages are available anymore.
  78. // By using a tag we don't have to query the VMObject for every page
  79. // whether it was committed or not
  80. m_lazy_committed_page = committed_pages.take_one();
  81. }
  82. UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
  83. {
  84. }
  85. UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
  86. {
  87. SpinlockLocker page_lock(kernel_page_directory().get_lock());
  88. // Disable writing to the kernel text and rodata segments.
  89. for (auto const* i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
  90. auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
  91. pte.set_writable(false);
  92. }
  93. if (Processor::current().has_feature(CPUFeature::NX)) {
  94. // Disable execution of the kernel data, bss and heap segments.
  95. for (auto const* i = start_of_kernel_data; i < end_of_kernel_image; i += PAGE_SIZE) {
  96. auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
  97. pte.set_execute_disabled(true);
  98. }
  99. }
  100. }
  101. UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
  102. {
  103. SpinlockLocker page_lock(kernel_page_directory().get_lock());
  104. SpinlockLocker mm_lock(s_mm_lock);
  105. auto start = start_of_prekernel_image.page_base().get();
  106. auto end = end_of_prekernel_image.page_base().get();
  107. for (auto i = start; i <= end; i += PAGE_SIZE)
  108. release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No);
  109. flush_tlb(&kernel_page_directory(), VirtualAddress(start), (end - start) / PAGE_SIZE);
  110. }
  111. UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
  112. {
  113. SpinlockLocker page_lock(kernel_page_directory().get_lock());
  114. SpinlockLocker mm_lock(s_mm_lock);
  115. // Disable writing to the .ro_after_init section
  116. for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
  117. auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
  118. pte.set_writable(false);
  119. flush_tlb(&kernel_page_directory(), VirtualAddress(i));
  120. }
  121. }
  122. void MemoryManager::unmap_text_after_init()
  123. {
  124. SpinlockLocker page_lock(kernel_page_directory().get_lock());
  125. SpinlockLocker mm_lock(s_mm_lock);
  126. auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
  127. auto end = page_round_up((FlatPtr)&end_of_unmap_after_init).release_value_but_fixme_should_propagate_errors();
  128. // Unmap the entire .unmap_after_init section
  129. for (auto i = start; i < end; i += PAGE_SIZE) {
  130. auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
  131. pte.clear();
  132. flush_tlb(&kernel_page_directory(), VirtualAddress(i));
  133. }
  134. dmesgln("Unmapped {} KiB of kernel text after init! :^)", (end - start) / KiB);
  135. }
  136. UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init()
  137. {
  138. SpinlockLocker mm_lock(s_mm_lock);
  139. SpinlockLocker page_lock(kernel_page_directory().get_lock());
  140. auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
  141. auto end = page_round_up((FlatPtr)end_of_kernel_ksyms).release_value_but_fixme_should_propagate_errors();
  142. for (auto i = start; i < end; i += PAGE_SIZE) {
  143. auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
  144. pte.set_writable(false);
  145. flush_tlb(&kernel_page_directory(), VirtualAddress(i));
  146. }
  147. dmesgln("Write-protected kernel symbols after init.");
  148. }
  149. IterationDecision MemoryManager::for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)> callback)
  150. {
  151. VERIFY(!m_physical_memory_ranges.is_empty());
  152. for (auto& current_range : m_physical_memory_ranges) {
  153. IterationDecision decision = callback(current_range);
  154. if (decision != IterationDecision::Continue)
  155. return decision;
  156. }
  157. return IterationDecision::Continue;
  158. }
  159. UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
  160. {
  161. VERIFY(!m_physical_memory_ranges.is_empty());
  162. ContiguousReservedMemoryRange range;
  163. for (auto& current_range : m_physical_memory_ranges) {
  164. if (current_range.type != PhysicalMemoryRangeType::Reserved) {
  165. if (range.start.is_null())
  166. continue;
  167. m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
  168. range.start.set((FlatPtr) nullptr);
  169. continue;
  170. }
  171. if (!range.start.is_null()) {
  172. continue;
  173. }
  174. range.start = current_range.start;
  175. }
  176. if (m_physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
  177. return;
  178. if (range.start.is_null())
  179. return;
  180. m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
  181. }
  182. bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress start_address, size_t read_length) const
  183. {
  184. // Note: Guard against overflow in case someone tries to mmap on the edge of
  185. // the RAM
  186. if (start_address.offset_addition_would_overflow(read_length))
  187. return false;
  188. auto end_address = start_address.offset(read_length);
  189. for (auto const& current_range : m_reserved_memory_ranges) {
  190. if (current_range.start > start_address)
  191. continue;
  192. if (current_range.start.offset(current_range.length) < end_address)
  193. continue;
  194. return true;
  195. }
  196. return false;
  197. }
  198. UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
  199. {
  200. // Register used memory regions that we know of.
  201. m_used_memory_ranges.ensure_capacity(4);
  202. m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
  203. m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
  204. if (multiboot_flags & 0x4) {
  205. auto* bootmods_start = multiboot_copy_boot_modules_array;
  206. auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
  207. for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
  208. m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
  209. }
  210. }
  211. auto* mmap_begin = multiboot_memory_map;
  212. auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
  213. struct ContiguousPhysicalVirtualRange {
  214. PhysicalAddress lower;
  215. PhysicalAddress upper;
  216. };
  217. Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
  218. for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
  219. // We have to copy these onto the stack, because we take a reference to these when printing them out,
  220. // and doing so on a packed struct field is UB.
  221. auto address = mmap->addr;
  222. auto length = mmap->len;
  223. ArmedScopeGuard write_back_guard = [&]() {
  224. mmap->addr = address;
  225. mmap->len = length;
  226. };
  227. dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", address, length, mmap->type);
  228. auto start_address = PhysicalAddress(address);
  229. switch (mmap->type) {
  230. case (MULTIBOOT_MEMORY_AVAILABLE):
  231. m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
  232. break;
  233. case (MULTIBOOT_MEMORY_RESERVED):
  234. m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
  235. break;
  236. case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
  237. m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
  238. break;
  239. case (MULTIBOOT_MEMORY_NVS):
  240. m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
  241. break;
  242. case (MULTIBOOT_MEMORY_BADRAM):
  243. dmesgln("MM: Warning, detected bad memory range!");
  244. m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
  245. break;
  246. default:
  247. dbgln("MM: Unknown range!");
  248. m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
  249. break;
  250. }
  251. if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
  252. continue;
  253. // Fix up unaligned memory regions.
  254. auto diff = (FlatPtr)address % PAGE_SIZE;
  255. if (diff != 0) {
  256. dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", address, diff);
  257. diff = PAGE_SIZE - diff;
  258. address += diff;
  259. length -= diff;
  260. }
  261. if ((length % PAGE_SIZE) != 0) {
  262. dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", length, length % PAGE_SIZE);
  263. length -= length % PAGE_SIZE;
  264. }
  265. if (length < PAGE_SIZE) {
  266. dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, length);
  267. continue;
  268. }
  269. for (PhysicalSize page_base = address; page_base <= (address + length); page_base += PAGE_SIZE) {
  270. auto addr = PhysicalAddress(page_base);
  271. // Skip used memory ranges.
  272. bool should_skip = false;
  273. for (auto& used_range : m_used_memory_ranges) {
  274. if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
  275. should_skip = true;
  276. break;
  277. }
  278. }
  279. if (should_skip)
  280. continue;
  281. if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) {
  282. contiguous_physical_ranges.append(ContiguousPhysicalVirtualRange {
  283. .lower = addr,
  284. .upper = addr,
  285. });
  286. } else {
  287. contiguous_physical_ranges.last().upper = addr;
  288. }
  289. }
  290. }
  291. for (auto& range : contiguous_physical_ranges) {
  292. m_user_physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
  293. }
  294. // Super pages are guaranteed to be in the first 16MB of physical memory
  295. VERIFY(virtual_to_low_physical((FlatPtr)super_pages) + sizeof(super_pages) < 0x1000000);
  296. // Append statically-allocated super physical physical_region.
  297. m_super_physical_region = PhysicalRegion::try_create(
  298. PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages))),
  299. PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages)))));
  300. VERIFY(m_super_physical_region);
  301. m_system_memory_info.super_physical_pages += m_super_physical_region->size();
  302. for (auto& region : m_user_physical_regions)
  303. m_system_memory_info.user_physical_pages += region.size();
  304. register_reserved_ranges();
  305. for (auto& range : m_reserved_memory_ranges) {
  306. dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
  307. }
  308. initialize_physical_pages();
  309. VERIFY(m_system_memory_info.super_physical_pages > 0);
  310. VERIFY(m_system_memory_info.user_physical_pages > 0);
  311. // We start out with no committed pages
  312. m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
  313. for (auto& used_range : m_used_memory_ranges) {
  314. dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
  315. }
  316. dmesgln("MM: Super physical region: {} - {} (size {:#x})", m_super_physical_region->lower(), m_super_physical_region->upper().offset(-1), PAGE_SIZE * m_super_physical_region->size());
  317. m_super_physical_region->initialize_zones();
  318. for (auto& region : m_user_physical_regions) {
  319. dmesgln("MM: User physical region: {} - {} (size {:#x})", region.lower(), region.upper().offset(-1), PAGE_SIZE * region.size());
  320. region.initialize_zones();
  321. }
  322. }
  323. UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
  324. {
  325. // We assume that the physical page range is contiguous and doesn't contain huge gaps!
  326. PhysicalAddress highest_physical_address;
  327. for (auto& range : m_used_memory_ranges) {
  328. if (range.end.get() > highest_physical_address.get())
  329. highest_physical_address = range.end;
  330. }
  331. for (auto& region : m_physical_memory_ranges) {
  332. auto range_end = PhysicalAddress(region.start).offset(region.length);
  333. if (range_end.get() > highest_physical_address.get())
  334. highest_physical_address = range_end;
  335. }
  336. // Calculate how many total physical pages the array will have
  337. m_physical_page_entries_count = PhysicalAddress::physical_page_index(highest_physical_address.get()) + 1;
  338. VERIFY(m_physical_page_entries_count != 0);
  339. VERIFY(!Checked<decltype(m_physical_page_entries_count)>::multiplication_would_overflow(m_physical_page_entries_count, sizeof(PhysicalPageEntry)));
  340. // Calculate how many bytes the array will consume
  341. auto physical_page_array_size = m_physical_page_entries_count * sizeof(PhysicalPageEntry);
  342. auto physical_page_array_pages = page_round_up(physical_page_array_size).release_value_but_fixme_should_propagate_errors() / PAGE_SIZE;
  343. VERIFY(physical_page_array_pages * PAGE_SIZE >= physical_page_array_size);
  344. // Calculate how many page tables we will need to be able to map them all
  345. auto needed_page_table_count = (physical_page_array_pages + 512 - 1) / 512;
  346. auto physical_page_array_pages_and_page_tables_count = physical_page_array_pages + needed_page_table_count;
  347. // Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
  348. PhysicalRegion* found_region { nullptr };
  349. Optional<size_t> found_region_index;
  350. for (size_t i = 0; i < m_user_physical_regions.size(); ++i) {
  351. auto& region = m_user_physical_regions[i];
  352. if (region.size() >= physical_page_array_pages_and_page_tables_count) {
  353. found_region = &region;
  354. found_region_index = i;
  355. break;
  356. }
  357. }
  358. if (!found_region) {
  359. dmesgln("MM: Need {} bytes for physical page management, but no memory region is large enough!", physical_page_array_pages_and_page_tables_count);
  360. VERIFY_NOT_REACHED();
  361. }
  362. VERIFY(m_system_memory_info.user_physical_pages >= physical_page_array_pages_and_page_tables_count);
  363. m_system_memory_info.user_physical_pages -= physical_page_array_pages_and_page_tables_count;
  364. if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
  365. // We're stealing the entire region
  366. m_physical_pages_region = m_user_physical_regions.take(*found_region_index);
  367. } else {
  368. m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
  369. }
  370. m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
  371. // Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
  372. m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
  373. // Allocate a virtual address range for our array
  374. auto range_or_error = m_kernel_page_directory->range_allocator().try_allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
  375. if (range_or_error.is_error()) {
  376. dmesgln("MM: Could not allocate {} bytes to map physical page array!", physical_page_array_pages * PAGE_SIZE);
  377. VERIFY_NOT_REACHED();
  378. }
  379. auto range = range_or_error.release_value();
  380. // Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
  381. // try to map the entire region into kernel space so we always have it
  382. // We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
  383. // mapped yet so we can't create them
  384. SpinlockLocker lock(s_mm_lock);
  385. // Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
  386. auto page_tables_base = m_physical_pages_region->lower();
  387. auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
  388. auto physical_page_array_current_page = physical_page_array_base.get();
  389. auto virtual_page_array_base = range.base().get();
  390. auto virtual_page_array_current_page = virtual_page_array_base;
  391. for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
  392. auto virtual_page_base_for_this_pt = virtual_page_array_current_page;
  393. auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
  394. auto* pt = reinterpret_cast<PageTableEntry*>(quickmap_page(pt_paddr));
  395. __builtin_memset(pt, 0, PAGE_SIZE);
  396. for (size_t pte_index = 0; pte_index < PAGE_SIZE / sizeof(PageTableEntry); pte_index++) {
  397. auto& pte = pt[pte_index];
  398. pte.set_physical_page_base(physical_page_array_current_page);
  399. pte.set_user_allowed(false);
  400. pte.set_writable(true);
  401. if (Processor::current().has_feature(CPUFeature::NX))
  402. pte.set_execute_disabled(false);
  403. pte.set_global(true);
  404. pte.set_present(true);
  405. physical_page_array_current_page += PAGE_SIZE;
  406. virtual_page_array_current_page += PAGE_SIZE;
  407. }
  408. unquickmap_page();
  409. // Hook the page table into the kernel page directory
  410. u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
  411. auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd_kernel));
  412. PageDirectoryEntry& pde = pd[page_directory_index];
  413. VERIFY(!pde.is_present()); // Nothing should be using this PD yet
  414. // We can't use ensure_pte quite yet!
  415. pde.set_page_table_base(pt_paddr.get());
  416. pde.set_user_allowed(false);
  417. pde.set_present(true);
  418. pde.set_writable(true);
  419. pde.set_global(true);
  420. unquickmap_page();
  421. flush_tlb_local(VirtualAddress(virtual_page_base_for_this_pt));
  422. }
  423. // We now have the entire PhysicalPageEntry array mapped!
  424. m_physical_page_entries = (PhysicalPageEntry*)range.base().get();
  425. for (size_t i = 0; i < m_physical_page_entries_count; i++)
  426. new (&m_physical_page_entries[i]) PageTableEntry();
  427. // Now we should be able to allocate PhysicalPage instances,
  428. // so finish setting up the kernel page directory
  429. m_kernel_page_directory->allocate_kernel_directory();
  430. // Now create legit PhysicalPage objects for the page tables we created.
  431. virtual_page_array_current_page = virtual_page_array_base;
  432. for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
  433. VERIFY(virtual_page_array_current_page <= range.end().get());
  434. auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
  435. auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
  436. auto& physical_page_entry = m_physical_page_entries[physical_page_index];
  437. auto physical_page = adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(MayReturnToFreeList::No));
  438. // NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
  439. (void)physical_page.leak_ref();
  440. virtual_page_array_current_page += (PAGE_SIZE / sizeof(PageTableEntry)) * PAGE_SIZE;
  441. }
  442. dmesgln("MM: Physical page entries: {}", range);
  443. }
  444. PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
  445. {
  446. VERIFY(m_physical_page_entries);
  447. auto physical_page_entry_index = PhysicalAddress::physical_page_index(physical_address.get());
  448. VERIFY(physical_page_entry_index < m_physical_page_entries_count);
  449. return m_physical_page_entries[physical_page_entry_index];
  450. }
  451. PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical_page)
  452. {
  453. PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, allocated.physical_page));
  454. VERIFY(m_physical_page_entries);
  455. size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
  456. VERIFY(physical_page_entry_index < m_physical_page_entries_count);
  457. return PhysicalAddress((PhysicalPtr)physical_page_entry_index * PAGE_SIZE);
  458. }
  459. PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
  460. {
  461. VERIFY_INTERRUPTS_DISABLED();
  462. VERIFY(s_mm_lock.is_locked_by_current_processor());
  463. VERIFY(page_directory.get_lock().is_locked_by_current_processor());
  464. u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
  465. u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
  466. u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
  467. auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
  468. PageDirectoryEntry const& pde = pd[page_directory_index];
  469. if (!pde.is_present())
  470. return nullptr;
  471. return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
  472. }
  473. PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
  474. {
  475. VERIFY_INTERRUPTS_DISABLED();
  476. VERIFY(s_mm_lock.is_locked_by_current_processor());
  477. VERIFY(page_directory.get_lock().is_locked_by_current_processor());
  478. u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
  479. u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
  480. u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
  481. auto* pd = quickmap_pd(page_directory, page_directory_table_index);
  482. auto& pde = pd[page_directory_index];
  483. if (pde.is_present())
  484. return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
  485. bool did_purge = false;
  486. auto page_table_or_error = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
  487. if (page_table_or_error.is_error()) {
  488. dbgln("MM: Unable to allocate page table to map {}", vaddr);
  489. return nullptr;
  490. }
  491. auto page_table = page_table_or_error.release_value();
  492. if (did_purge) {
  493. // If any memory had to be purged, ensure_pte may have been called as part
  494. // of the purging process. So we need to re-map the pd in this case to ensure
  495. // we're writing to the correct underlying physical page
  496. pd = quickmap_pd(page_directory, page_directory_table_index);
  497. VERIFY(&pde == &pd[page_directory_index]); // Sanity check
  498. VERIFY(!pde.is_present()); // Should have not changed
  499. }
  500. pde.set_page_table_base(page_table->paddr().get());
  501. pde.set_user_allowed(true);
  502. pde.set_present(true);
  503. pde.set_writable(true);
  504. pde.set_global(&page_directory == m_kernel_page_directory.ptr());
  505. // NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
  506. (void)page_table.leak_ref();
  507. return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
  508. }
  509. void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release)
  510. {
  511. VERIFY_INTERRUPTS_DISABLED();
  512. VERIFY(s_mm_lock.is_locked_by_current_processor());
  513. VERIFY(page_directory.get_lock().is_locked_by_current_processor());
  514. u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
  515. u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
  516. u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
  517. auto* pd = quickmap_pd(page_directory, page_directory_table_index);
  518. PageDirectoryEntry& pde = pd[page_directory_index];
  519. if (pde.is_present()) {
  520. auto* page_table = quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()));
  521. auto& pte = page_table[page_table_index];
  522. pte.clear();
  523. if (is_last_pte_release == IsLastPTERelease::Yes || page_table_index == 0x1ff) {
  524. // If this is the last PTE in a region or the last PTE in a page table then
  525. // check if we can also release the page table
  526. bool all_clear = true;
  527. for (u32 i = 0; i <= 0x1ff; i++) {
  528. if (!page_table[i].is_null()) {
  529. all_clear = false;
  530. break;
  531. }
  532. }
  533. if (all_clear) {
  534. get_physical_page_entry(PhysicalAddress { pde.page_table_base() }).allocated.physical_page.unref();
  535. pde.clear();
  536. }
  537. }
  538. }
  539. }
  540. UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
  541. {
  542. ProcessorSpecific<MemoryManagerData>::initialize();
  543. if (cpu == 0) {
  544. new MemoryManager;
  545. kmalloc_enable_expand();
  546. }
  547. }
  548. Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
  549. {
  550. if (is_user_address(vaddr))
  551. return nullptr;
  552. SpinlockLocker lock(s_mm_lock);
  553. auto* region = MM.m_kernel_regions.find_largest_not_above(vaddr.get());
  554. if (!region || !region->contains(vaddr))
  555. return nullptr;
  556. return region;
  557. }
  558. Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
  559. {
  560. VERIFY(space.get_lock().is_locked_by_current_processor());
  561. return space.find_region_containing({ vaddr, 1 });
  562. }
  563. Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
  564. {
  565. SpinlockLocker lock(space.get_lock());
  566. return find_user_region_from_vaddr_no_lock(space, vaddr);
  567. }
  568. void MemoryManager::validate_syscall_preconditions(AddressSpace& space, RegisterState const& regs)
  569. {
  570. // We take the space lock once here and then use the no_lock variants
  571. // to avoid excessive spinlock recursion in this extremely common path.
  572. SpinlockLocker lock(space.get_lock());
  573. auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) {
  574. lock.unlock();
  575. handle_crash(regs, description, signal);
  576. };
  577. {
  578. VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
  579. if (!MM.validate_user_stack_no_lock(space, userspace_sp)) {
  580. dbgln("Invalid stack pointer: {}", userspace_sp);
  581. return unlock_and_handle_crash("Bad stack on syscall entry", SIGSEGV);
  582. }
  583. }
  584. {
  585. VirtualAddress ip = VirtualAddress { regs.ip() };
  586. auto* calling_region = MM.find_user_region_from_vaddr_no_lock(space, ip);
  587. if (!calling_region) {
  588. dbgln("Syscall from {:p} which has no associated region", ip);
  589. return unlock_and_handle_crash("Syscall from unknown region", SIGSEGV);
  590. }
  591. if (calling_region->is_writable()) {
  592. dbgln("Syscall from writable memory at {:p}", ip);
  593. return unlock_and_handle_crash("Syscall from writable memory", SIGSEGV);
  594. }
  595. if (space.enforces_syscall_regions() && !calling_region->is_syscall_region()) {
  596. dbgln("Syscall from non-syscall region");
  597. return unlock_and_handle_crash("Syscall from non-syscall region", SIGSEGV);
  598. }
  599. }
  600. }
  601. Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
  602. {
  603. if (auto* region = kernel_region_from_vaddr(vaddr))
  604. return region;
  605. auto page_directory = PageDirectory::find_by_cr3(read_cr3());
  606. if (!page_directory)
  607. return nullptr;
  608. VERIFY(page_directory->address_space());
  609. return find_user_region_from_vaddr(*page_directory->address_space(), vaddr);
  610. }
  611. PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
  612. {
  613. VERIFY_INTERRUPTS_DISABLED();
  614. auto faulted_in_range = [&fault](auto const* start, auto const* end) {
  615. return fault.vaddr() >= VirtualAddress { start } && fault.vaddr() < VirtualAddress { end };
  616. };
  617. if (faulted_in_range(&start_of_ro_after_init, &end_of_ro_after_init))
  618. PANIC("Attempt to write into READONLY_AFTER_INIT section");
  619. if (faulted_in_range(&start_of_unmap_after_init, &end_of_unmap_after_init)) {
  620. auto const* kernel_symbol = symbolicate_kernel_address(fault.vaddr().get());
  621. PANIC("Attempt to access UNMAP_AFTER_INIT section ({:p}: {})", fault.vaddr(), kernel_symbol ? kernel_symbol->name : "(Unknown)");
  622. }
  623. if (faulted_in_range(&start_of_kernel_ksyms, &end_of_kernel_ksyms))
  624. PANIC("Attempt to access KSYMS section");
  625. if (Processor::current_in_irq()) {
  626. dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
  627. Processor::current_id(), fault.code(), fault.vaddr(), Processor::current_in_irq());
  628. dump_kernel_regions();
  629. return PageFaultResponse::ShouldCrash;
  630. }
  631. dbgln_if(PAGE_FAULT_DEBUG, "MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::current_id(), fault.code(), fault.vaddr());
  632. auto* region = find_region_from_vaddr(fault.vaddr());
  633. if (!region) {
  634. return PageFaultResponse::ShouldCrash;
  635. }
  636. return region->handle_fault(fault);
  637. }
  638. ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
  639. {
  640. VERIFY(!(size % PAGE_SIZE));
  641. SpinlockLocker lock(kernel_page_directory().get_lock());
  642. auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
  643. auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
  644. return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
  645. }
  646. ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
  647. {
  648. dma_buffer_page = TRY(allocate_supervisor_physical_page());
  649. // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
  650. return allocate_kernel_region(dma_buffer_page->paddr(), PAGE_SIZE, name, access, Region::Cacheable::No);
  651. }
  652. ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
  653. {
  654. RefPtr<Memory::PhysicalPage> dma_buffer_page;
  655. return allocate_dma_buffer_page(name, access, dma_buffer_page);
  656. }
  657. ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
  658. {
  659. VERIFY(!(size % PAGE_SIZE));
  660. dma_buffer_pages = TRY(allocate_contiguous_supervisor_physical_pages(size));
  661. // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
  662. return allocate_kernel_region(dma_buffer_pages.first().paddr(), size, name, access, Region::Cacheable::No);
  663. }
  664. ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
  665. {
  666. VERIFY(!(size % PAGE_SIZE));
  667. NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
  668. return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
  669. }
  670. ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
  671. {
  672. VERIFY(!(size % PAGE_SIZE));
  673. auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
  674. SpinlockLocker lock(kernel_page_directory().get_lock());
  675. auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
  676. return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
  677. }
  678. ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
  679. {
  680. VERIFY(!(size % PAGE_SIZE));
  681. auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
  682. SpinlockLocker lock(kernel_page_directory().get_lock());
  683. auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
  684. return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
  685. }
  686. ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
  687. {
  688. OwnPtr<KString> name_kstring;
  689. if (!name.is_null())
  690. name_kstring = TRY(KString::try_create(name));
  691. auto region = TRY(Region::try_create_kernel_only(range, vmobject, 0, move(name_kstring), access, cacheable));
  692. TRY(region->map(kernel_page_directory()));
  693. return region;
  694. }
  695. ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
  696. {
  697. VERIFY(!(size % PAGE_SIZE));
  698. SpinlockLocker lock(kernel_page_directory().get_lock());
  699. auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
  700. return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
  701. }
  702. ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
  703. {
  704. VERIFY(page_count > 0);
  705. SpinlockLocker lock(s_mm_lock);
  706. if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
  707. return ENOMEM;
  708. m_system_memory_info.user_physical_pages_uncommitted -= page_count;
  709. m_system_memory_info.user_physical_pages_committed += page_count;
  710. return CommittedPhysicalPageSet { {}, page_count };
  711. }
  712. void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
  713. {
  714. VERIFY(page_count > 0);
  715. SpinlockLocker lock(s_mm_lock);
  716. VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
  717. m_system_memory_info.user_physical_pages_uncommitted += page_count;
  718. m_system_memory_info.user_physical_pages_committed -= page_count;
  719. }
  720. void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
  721. {
  722. SpinlockLocker lock(s_mm_lock);
  723. // Are we returning a user page?
  724. for (auto& region : m_user_physical_regions) {
  725. if (!region.contains(paddr))
  726. continue;
  727. region.return_page(paddr);
  728. --m_system_memory_info.user_physical_pages_used;
  729. // Always return pages to the uncommitted pool. Pages that were
  730. // committed and allocated are only freed upon request. Once
  731. // returned there is no guarantee being able to get them back.
  732. ++m_system_memory_info.user_physical_pages_uncommitted;
  733. return;
  734. }
  735. // If it's not a user page, it should be a supervisor page.
  736. if (!m_super_physical_region->contains(paddr))
  737. PANIC("MM: deallocate_user_physical_page couldn't figure out region for page @ {}", paddr);
  738. m_super_physical_region->return_page(paddr);
  739. --m_system_memory_info.super_physical_pages_used;
  740. }
  741. RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
  742. {
  743. VERIFY(s_mm_lock.is_locked());
  744. RefPtr<PhysicalPage> page;
  745. if (committed) {
  746. // Draw from the committed pages pool. We should always have these pages available
  747. VERIFY(m_system_memory_info.user_physical_pages_committed > 0);
  748. m_system_memory_info.user_physical_pages_committed--;
  749. } else {
  750. // We need to make sure we don't touch pages that we have committed to
  751. if (m_system_memory_info.user_physical_pages_uncommitted == 0)
  752. return {};
  753. m_system_memory_info.user_physical_pages_uncommitted--;
  754. }
  755. for (auto& region : m_user_physical_regions) {
  756. page = region.take_free_page();
  757. if (!page.is_null()) {
  758. ++m_system_memory_info.user_physical_pages_used;
  759. break;
  760. }
  761. }
  762. VERIFY(!committed || !page.is_null());
  763. return page;
  764. }
  765. NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
  766. {
  767. SpinlockLocker lock(s_mm_lock);
  768. auto page = find_free_user_physical_page(true);
  769. if (should_zero_fill == ShouldZeroFill::Yes) {
  770. auto* ptr = quickmap_page(*page);
  771. memset(ptr, 0, PAGE_SIZE);
  772. unquickmap_page();
  773. }
  774. return page.release_nonnull();
  775. }
  776. ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
  777. {
  778. SpinlockLocker lock(s_mm_lock);
  779. auto page = find_free_user_physical_page(false);
  780. bool purged_pages = false;
  781. if (!page) {
  782. // We didn't have a single free physical page. Let's try to free something up!
  783. // First, we look for a purgeable VMObject in the volatile state.
  784. for_each_vmobject([&](auto& vmobject) {
  785. if (!vmobject.is_anonymous())
  786. return IterationDecision::Continue;
  787. auto& anonymous_vmobject = static_cast<AnonymousVMObject&>(vmobject);
  788. if (!anonymous_vmobject.is_purgeable() || !anonymous_vmobject.is_volatile())
  789. return IterationDecision::Continue;
  790. if (auto purged_page_count = anonymous_vmobject.purge()) {
  791. dbgln("MM: Purge saved the day! Purged {} pages from AnonymousVMObject", purged_page_count);
  792. page = find_free_user_physical_page(false);
  793. purged_pages = true;
  794. VERIFY(page);
  795. return IterationDecision::Break;
  796. }
  797. return IterationDecision::Continue;
  798. });
  799. if (!page) {
  800. dmesgln("MM: no user physical pages available");
  801. return ENOMEM;
  802. }
  803. }
  804. if (should_zero_fill == ShouldZeroFill::Yes) {
  805. auto* ptr = quickmap_page(*page);
  806. memset(ptr, 0, PAGE_SIZE);
  807. unquickmap_page();
  808. }
  809. if (did_purge)
  810. *did_purge = purged_pages;
  811. return page.release_nonnull();
  812. }
  813. ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_user_physical_pages(size_t size)
  814. {
  815. VERIFY(!(size % PAGE_SIZE));
  816. SpinlockLocker lock(s_mm_lock);
  817. size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
  818. for (auto& physical_region : m_user_physical_regions) {
  819. auto physical_pages = physical_region.take_contiguous_free_pages(page_count);
  820. if (!physical_pages.is_empty()) {
  821. {
  822. auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write));
  823. memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
  824. }
  825. m_system_memory_info.user_physical_pages_used += page_count;
  826. return physical_pages;
  827. }
  828. }
  829. dmesgln("MM: no contiguous user physical pages available");
  830. return ENOMEM;
  831. }
  832. ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
  833. {
  834. VERIFY(!(size % PAGE_SIZE));
  835. SpinlockLocker lock(s_mm_lock);
  836. size_t count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
  837. auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count);
  838. if (physical_pages.is_empty()) {
  839. dmesgln("MM: no super physical pages available");
  840. return ENOMEM;
  841. }
  842. {
  843. auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write));
  844. memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * count);
  845. }
  846. m_system_memory_info.super_physical_pages_used += count;
  847. return physical_pages;
  848. }
  849. ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_supervisor_physical_page()
  850. {
  851. SpinlockLocker lock(s_mm_lock);
  852. auto page = m_super_physical_region->take_free_page();
  853. if (!page) {
  854. dmesgln("MM: no super physical pages available");
  855. return ENOMEM;
  856. }
  857. auto* ptr = quickmap_page(*page);
  858. memset(ptr, 0, PAGE_SIZE);
  859. unquickmap_page();
  860. ++m_system_memory_info.super_physical_pages_used;
  861. return page.release_nonnull();
  862. }
  863. void MemoryManager::enter_process_address_space(Process& process)
  864. {
  865. enter_address_space(process.address_space());
  866. }
  867. void MemoryManager::enter_address_space(AddressSpace& space)
  868. {
  869. auto* current_thread = Thread::current();
  870. VERIFY(current_thread != nullptr);
  871. SpinlockLocker lock(s_mm_lock);
  872. current_thread->regs().cr3 = space.page_directory().cr3();
  873. write_cr3(space.page_directory().cr3());
  874. }
  875. void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
  876. {
  877. Processor::flush_tlb_local(vaddr, page_count);
  878. }
  879. void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
  880. {
  881. Processor::flush_tlb(page_directory, vaddr, page_count);
  882. }
  883. PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
  884. {
  885. VERIFY(s_mm_lock.is_locked_by_current_processor());
  886. auto& mm_data = get_data();
  887. auto& pte = boot_pd_kernel_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
  888. auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
  889. if (pte.physical_page_base() != pd_paddr.get()) {
  890. pte.set_physical_page_base(pd_paddr.get());
  891. pte.set_present(true);
  892. pte.set_writable(true);
  893. pte.set_user_allowed(false);
  894. // Because we must continue to hold the MM lock while we use this
  895. // mapping, it is sufficient to only flush on the current CPU. Other
  896. // CPUs trying to use this API must wait on the MM lock anyway
  897. flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
  898. } else {
  899. // Even though we don't allow this to be called concurrently, it's
  900. // possible that this PD was mapped on a different CPU and we don't
  901. // broadcast the flush. If so, we still need to flush the TLB.
  902. if (mm_data.m_last_quickmap_pd != pd_paddr)
  903. flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
  904. }
  905. mm_data.m_last_quickmap_pd = pd_paddr;
  906. return (PageDirectoryEntry*)KERNEL_QUICKMAP_PD;
  907. }
  908. PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
  909. {
  910. VERIFY(s_mm_lock.is_locked_by_current_processor());
  911. auto& mm_data = get_data();
  912. auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
  913. if (pte.physical_page_base() != pt_paddr.get()) {
  914. pte.set_physical_page_base(pt_paddr.get());
  915. pte.set_present(true);
  916. pte.set_writable(true);
  917. pte.set_user_allowed(false);
  918. // Because we must continue to hold the MM lock while we use this
  919. // mapping, it is sufficient to only flush on the current CPU. Other
  920. // CPUs trying to use this API must wait on the MM lock anyway
  921. flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
  922. } else {
  923. // Even though we don't allow this to be called concurrently, it's
  924. // possible that this PT was mapped on a different CPU and we don't
  925. // broadcast the flush. If so, we still need to flush the TLB.
  926. if (mm_data.m_last_quickmap_pt != pt_paddr)
  927. flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
  928. }
  929. mm_data.m_last_quickmap_pt = pt_paddr;
  930. return (PageTableEntry*)KERNEL_QUICKMAP_PT;
  931. }
  932. u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
  933. {
  934. VERIFY_INTERRUPTS_DISABLED();
  935. VERIFY(s_mm_lock.is_locked_by_current_processor());
  936. auto& mm_data = get_data();
  937. mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
  938. VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
  939. u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
  940. auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
  941. if (pte.physical_page_base() != physical_address.get()) {
  942. pte.set_physical_page_base(physical_address.get());
  943. pte.set_present(true);
  944. pte.set_writable(true);
  945. pte.set_user_allowed(false);
  946. flush_tlb_local(vaddr);
  947. }
  948. return vaddr.as_ptr();
  949. }
  950. void MemoryManager::unquickmap_page()
  951. {
  952. VERIFY_INTERRUPTS_DISABLED();
  953. VERIFY(s_mm_lock.is_locked_by_current_processor());
  954. auto& mm_data = get_data();
  955. VERIFY(mm_data.m_quickmap_in_use.is_locked());
  956. VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
  957. u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
  958. auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
  959. pte.clear();
  960. flush_tlb_local(vaddr);
  961. mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
  962. }
  963. bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const
  964. {
  965. VERIFY(space.get_lock().is_locked_by_current_processor());
  966. if (!is_user_address(vaddr))
  967. return false;
  968. auto* region = find_user_region_from_vaddr_no_lock(space, vaddr);
  969. return region && region->is_user() && region->is_stack();
  970. }
  971. bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
  972. {
  973. SpinlockLocker lock(space.get_lock());
  974. return validate_user_stack_no_lock(space, vaddr);
  975. }
  976. void MemoryManager::register_kernel_region(Region& region)
  977. {
  978. VERIFY(region.is_kernel());
  979. SpinlockLocker lock(s_mm_lock);
  980. m_kernel_regions.insert(region.vaddr().get(), region);
  981. }
  982. void MemoryManager::unregister_kernel_region(Region& region)
  983. {
  984. VERIFY(region.is_kernel());
  985. SpinlockLocker lock(s_mm_lock);
  986. m_kernel_regions.remove(region.vaddr().get());
  987. }
  988. void MemoryManager::dump_kernel_regions()
  989. {
  990. dbgln("Kernel regions:");
  991. #if ARCH(I386)
  992. char const* addr_padding = "";
  993. #else
  994. char const* addr_padding = " ";
  995. #endif
  996. dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
  997. addr_padding, addr_padding, addr_padding);
  998. SpinlockLocker lock(s_mm_lock);
  999. for (auto const& region : m_kernel_regions) {
  1000. dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
  1001. region.vaddr().get(),
  1002. region.vaddr().offset(region.size() - 1).get(),
  1003. region.size(),
  1004. region.is_readable() ? 'R' : ' ',
  1005. region.is_writable() ? 'W' : ' ',
  1006. region.is_executable() ? 'X' : ' ',
  1007. region.is_shared() ? 'S' : ' ',
  1008. region.is_stack() ? 'T' : ' ',
  1009. region.is_syscall_region() ? 'C' : ' ',
  1010. region.name());
  1011. }
  1012. }
  1013. void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
  1014. {
  1015. SpinlockLocker page_lock(kernel_page_directory().get_lock());
  1016. SpinlockLocker lock(s_mm_lock);
  1017. auto* pte = ensure_pte(kernel_page_directory(), vaddr);
  1018. VERIFY(pte);
  1019. if (pte->is_writable() == writable)
  1020. return;
  1021. pte->set_writable(writable);
  1022. flush_tlb(&kernel_page_directory(), vaddr);
  1023. }
  1024. CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
  1025. {
  1026. if (m_page_count)
  1027. MM.uncommit_user_physical_pages({}, m_page_count);
  1028. }
  1029. NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
  1030. {
  1031. VERIFY(m_page_count > 0);
  1032. --m_page_count;
  1033. return MM.allocate_committed_user_physical_page({}, MemoryManager::ShouldZeroFill::Yes);
  1034. }
  1035. void CommittedPhysicalPageSet::uncommit_one()
  1036. {
  1037. VERIFY(m_page_count > 0);
  1038. --m_page_count;
  1039. MM.uncommit_user_physical_pages({}, 1);
  1040. }
  1041. void MemoryManager::copy_physical_page(PhysicalPage& physical_page, u8 page_buffer[PAGE_SIZE])
  1042. {
  1043. SpinlockLocker locker(s_mm_lock);
  1044. auto* quickmapped_page = quickmap_page(physical_page);
  1045. memcpy(page_buffer, quickmapped_page, PAGE_SIZE);
  1046. unquickmap_page();
  1047. }
  1048. }