MemoryManager.cpp 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. #include "MemoryManager.h"
  2. #include <AK/Assertions.h>
  3. #include <AK/kstdio.h>
  4. #include <AK/kmalloc.h>
  5. #include "i386.h"
  6. #include "StdLib.h"
  7. #include "Task.h"
  8. static MemoryManager* s_the;
  9. MemoryManager& MM
  10. {
  11. return *s_the;
  12. }
  13. MemoryManager::MemoryManager()
  14. {
  15. m_pageDirectory = (dword*)0x5000;
  16. m_pageTableZero = (dword*)0x6000;
  17. m_pageTableOne = (dword*)0x7000;
  18. initializePaging();
  19. }
  20. MemoryManager::~MemoryManager()
  21. {
  22. }
  23. void MemoryManager::initializePaging()
  24. {
  25. static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
  26. static_assert(sizeof(MemoryManager::PageTableEntry) == 4);
  27. memset(m_pageTableZero, 0, 4096);
  28. memset(m_pageTableOne, 0, 4096);
  29. memset(m_pageDirectory, 0, 4096);
  30. kprintf("[MM] Page directory @ %p\n", m_pageDirectory);
  31. // Make null dereferences crash.
  32. protectMap(LinearAddress(0), 4 * KB);
  33. identityMap(LinearAddress(4096), 4 * MB);
  34. // Put pages between 4MB and 8MB in the page freelist.
  35. for (size_t i = (4 * MB) + PAGE_SIZE; i < (8 * MB); i += PAGE_SIZE) {
  36. m_freePages.append(PhysicalAddress(i));
  37. }
  38. asm volatile("movl %%eax, %%cr3"::"a"(m_pageDirectory));
  39. asm volatile(
  40. "movl %cr0, %eax\n"
  41. "orl $0x80000001, %eax\n"
  42. "movl %eax, %cr0\n"
  43. );
  44. }
  45. void* MemoryManager::allocatePageTable()
  46. {
  47. auto ppages = allocatePhysicalPages(1);
  48. dword address = ppages[0].get();
  49. identityMap(LinearAddress(address), 4096);
  50. return (void*)address;
  51. }
  52. auto MemoryManager::ensurePTE(LinearAddress linearAddress) -> PageTableEntry
  53. {
  54. ASSERT_INTERRUPTS_DISABLED();
  55. dword pageDirectoryIndex = (linearAddress.get() >> 22) & 0x3ff;
  56. dword pageTableIndex = (linearAddress.get() >> 12) & 0x3ff;
  57. PageDirectoryEntry pde = PageDirectoryEntry(&m_pageDirectory[pageDirectoryIndex]);
  58. if (!pde.isPresent()) {
  59. kprintf("[MM] PDE %u not present, allocating\n", pageDirectoryIndex);
  60. if (pageDirectoryIndex == 0) {
  61. pde.setPageTableBase((dword)m_pageTableZero);
  62. pde.setUserAllowed(true);
  63. pde.setPresent(true);
  64. pde.setWritable(true);
  65. } else if (pageDirectoryIndex == 1) {
  66. pde.setPageTableBase((dword)m_pageTableOne);
  67. pde.setUserAllowed(true);
  68. pde.setPresent(true);
  69. pde.setWritable(true);
  70. } else {
  71. auto* pageTable = allocatePageTable();
  72. kprintf("[MM] Allocated page table #%u (for laddr=%p) at %p\n", pageDirectoryIndex, linearAddress.get(), pageTable);
  73. memset(pageTable, 0, 4096);
  74. pde.setPageTableBase((dword)pageTable);
  75. pde.setUserAllowed(true);
  76. pde.setPresent(true);
  77. pde.setWritable(true);
  78. }
  79. }
  80. return PageTableEntry(&pde.pageTableBase()[pageTableIndex]);
  81. }
  82. void MemoryManager::protectMap(LinearAddress linearAddress, size_t length)
  83. {
  84. InterruptDisabler disabler;
  85. // FIXME: ASSERT(linearAddress is 4KB aligned);
  86. for (dword offset = 0; offset < length; offset += 4096) {
  87. auto pteAddress = linearAddress.offset(offset);
  88. auto pte = ensurePTE(pteAddress);
  89. pte.setPhysicalPageBase(pteAddress.get());
  90. pte.setUserAllowed(false);
  91. pte.setPresent(false);
  92. pte.setWritable(false);
  93. flushTLB(pteAddress);
  94. }
  95. }
  96. void MemoryManager::identityMap(LinearAddress linearAddress, size_t length)
  97. {
  98. InterruptDisabler disabler;
  99. // FIXME: ASSERT(linearAddress is 4KB aligned);
  100. for (dword offset = 0; offset < length; offset += 4096) {
  101. auto pteAddress = linearAddress.offset(offset);
  102. auto pte = ensurePTE(pteAddress);
  103. pte.setPhysicalPageBase(pteAddress.get());
  104. pte.setUserAllowed(true);
  105. pte.setPresent(true);
  106. pte.setWritable(true);
  107. flushTLB(pteAddress);
  108. }
  109. }
  110. void MemoryManager::initialize()
  111. {
  112. s_the = new MemoryManager;
  113. }
  114. PageFaultResponse MemoryManager::handlePageFault(const PageFault& fault)
  115. {
  116. ASSERT_INTERRUPTS_DISABLED();
  117. kprintf("[MM] handlePageFault(%w) at laddr=%p\n", fault.code(), fault.address().get());
  118. if (fault.isNotPresent()) {
  119. kprintf(" >> NP fault!\n");
  120. } else if (fault.isProtectionViolation()) {
  121. kprintf(" >> PV fault!\n");
  122. }
  123. return PageFaultResponse::ShouldCrash;
  124. }
  125. void MemoryManager::registerZone(Zone& zone)
  126. {
  127. ASSERT_INTERRUPTS_DISABLED();
  128. m_zones.set(&zone);
  129. }
  130. void MemoryManager::unregisterZone(Zone& zone)
  131. {
  132. ASSERT_INTERRUPTS_DISABLED();
  133. m_zones.remove(&zone);
  134. m_freePages.append(move(zone.m_pages));
  135. }
  136. Zone::Zone(Vector<PhysicalAddress>&& pages)
  137. : m_pages(move(pages))
  138. {
  139. MM.registerZone(*this);
  140. }
  141. Zone::~Zone()
  142. {
  143. MM.unregisterZone(*this);
  144. }
  145. RetainPtr<Zone> MemoryManager::createZone(size_t size)
  146. {
  147. InterruptDisabler disabler;
  148. auto pages = allocatePhysicalPages(ceilDiv(size, PAGE_SIZE));
  149. if (pages.isEmpty()) {
  150. kprintf("[MM] createZone: no physical pages for size %u", size);
  151. return nullptr;
  152. }
  153. return adopt(*new Zone(move(pages)));
  154. }
  155. Vector<PhysicalAddress> MemoryManager::allocatePhysicalPages(size_t count)
  156. {
  157. InterruptDisabler disabler;
  158. if (count > m_freePages.size())
  159. return { };
  160. Vector<PhysicalAddress> pages;
  161. pages.ensureCapacity(count);
  162. for (size_t i = 0; i < count; ++i)
  163. pages.append(m_freePages.takeLast());
  164. return pages;
  165. }
  166. byte* MemoryManager::quickMapOnePage(PhysicalAddress physicalAddress)
  167. {
  168. ASSERT_INTERRUPTS_DISABLED();
  169. auto pte = ensurePTE(LinearAddress(4 * MB));
  170. kprintf("[MM] quickmap %x @ %x {pte @ %p}\n", physicalAddress.get(), 4*MB, pte.ptr());
  171. pte.setPhysicalPageBase(physicalAddress.pageBase());
  172. pte.setPresent(true);
  173. pte.setWritable(true);
  174. flushTLB(LinearAddress(4 * MB));
  175. return (byte*)(4 * MB);
  176. }
  177. void MemoryManager::flushEntireTLB()
  178. {
  179. asm volatile(
  180. "mov %cr3, %eax\n"
  181. "mov %eax, %cr3\n"
  182. );
  183. }
  184. void MemoryManager::flushTLB(LinearAddress laddr)
  185. {
  186. asm volatile("invlpg %0": :"m" (*(char*)laddr.get()));
  187. }
  188. bool MemoryManager::unmapRegion(Task& task, Task::Region& region)
  189. {
  190. InterruptDisabler disabler;
  191. auto& zone = *region.zone;
  192. for (size_t i = 0; i < zone.m_pages.size(); ++i) {
  193. auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
  194. auto pte = ensurePTE(laddr);
  195. pte.setPhysicalPageBase(0);
  196. pte.setPresent(false);
  197. pte.setWritable(false);
  198. pte.setUserAllowed(false);
  199. flushTLB(laddr);
  200. //kprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
  201. }
  202. return true;
  203. }
  204. bool MemoryManager::unmapSubregion(Task& task, Task::Subregion& subregion)
  205. {
  206. InterruptDisabler disabler;
  207. auto& region = *subregion.region;
  208. auto& zone = *region.zone;
  209. size_t numPages = subregion.size / 4096;
  210. ASSERT(numPages);
  211. for (size_t i = 0; i < numPages; ++i) {
  212. auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
  213. auto pte = ensurePTE(laddr);
  214. pte.setPhysicalPageBase(0);
  215. pte.setPresent(false);
  216. pte.setWritable(false);
  217. pte.setUserAllowed(false);
  218. flushTLB(laddr);
  219. //kprintf("MM: >> Unmapped subregion %s L%x => P%x <<\n", subregion.name.characters(), laddr, zone.m_pages[i].get());
  220. }
  221. return true;
  222. }
  223. bool MemoryManager::unmapRegionsForTask(Task& task)
  224. {
  225. ASSERT_INTERRUPTS_DISABLED();
  226. for (auto& region : task.m_regions) {
  227. if (!unmapRegion(task, *region))
  228. return false;
  229. }
  230. for (auto& subregion : task.m_subregions) {
  231. if (!unmapSubregion(task, *subregion))
  232. return false;
  233. }
  234. return true;
  235. }
  236. bool MemoryManager::mapSubregion(Task& task, Task::Subregion& subregion)
  237. {
  238. InterruptDisabler disabler;
  239. auto& region = *subregion.region;
  240. auto& zone = *region.zone;
  241. size_t firstPage = subregion.offset / 4096;
  242. size_t numPages = subregion.size / 4096;
  243. ASSERT(numPages);
  244. for (size_t i = 0; i < numPages; ++i) {
  245. auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
  246. auto pte = ensurePTE(laddr);
  247. pte.setPhysicalPageBase(zone.m_pages[firstPage + i].get());
  248. pte.setPresent(true);
  249. pte.setWritable(true);
  250. pte.setUserAllowed(!task.isRing0());
  251. flushTLB(laddr);
  252. //kprintf("MM: >> Mapped subregion %s L%x => P%x (%u into region)<<\n", subregion.name.characters(), laddr, zone.m_pages[firstPage + i].get(), subregion.offset);
  253. }
  254. return true;
  255. }
  256. bool MemoryManager::mapRegion(Task& task, Task::Region& region)
  257. {
  258. InterruptDisabler disabler;
  259. auto& zone = *region.zone;
  260. for (size_t i = 0; i < zone.m_pages.size(); ++i) {
  261. auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
  262. auto pte = ensurePTE(laddr);
  263. pte.setPhysicalPageBase(zone.m_pages[i].get());
  264. pte.setPresent(true);
  265. pte.setWritable(true);
  266. pte.setUserAllowed(!task.isRing0());
  267. flushTLB(laddr);
  268. //kprintf("MM: >> Mapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
  269. }
  270. return true;
  271. }
  272. bool MemoryManager::mapRegionsForTask(Task& task)
  273. {
  274. ASSERT_INTERRUPTS_DISABLED();
  275. for (auto& region : task.m_regions) {
  276. if (!mapRegion(task, *region))
  277. return false;
  278. }
  279. for (auto& subregion : task.m_subregions) {
  280. if (!mapSubregion(task, *subregion))
  281. return false;
  282. }
  283. return true;
  284. }
  285. bool copyToZone(Zone& zone, const void* data, size_t size)
  286. {
  287. if (zone.size() < size) {
  288. kprintf("[MM] copyToZone: can't fit %u bytes into zone with size %u\n", size, zone.size());
  289. return false;
  290. }
  291. InterruptDisabler disabler;
  292. auto* dataptr = (const byte*)data;
  293. size_t remaining = size;
  294. for (size_t i = 0; i < zone.m_pages.size(); ++i) {
  295. byte* dest = MM.quickMapOnePage(zone.m_pages[i]);
  296. kprintf("memcpy(%p, %p, %u)\n", dest, dataptr, min(PAGE_SIZE, remaining));
  297. memcpy(dest, dataptr, min(PAGE_SIZE, remaining));
  298. dataptr += PAGE_SIZE;
  299. remaining -= PAGE_SIZE;
  300. }
  301. return true;
  302. }