MemoryManager.cpp 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. #include "MemoryManager.h"
  2. #include <AK/Assertions.h>
  3. #include <AK/kstdio.h>
  4. #include <AK/kmalloc.h>
  5. #include "i386.h"
  6. #include "StdLib.h"
  7. #include "Task.h"
  8. static MemoryManager* s_the;
  9. MemoryManager& MemoryManager::the()
  10. {
  11. return *s_the;
  12. }
  13. MemoryManager::MemoryManager()
  14. {
  15. m_pageDirectory = (dword*)0x5000;
  16. m_pageTableZero = (dword*)0x6000;
  17. m_pageTableOne = (dword*)0x7000;
  18. initializePaging();
  19. }
  20. MemoryManager::~MemoryManager()
  21. {
  22. }
  23. void MemoryManager::initializePaging()
  24. {
  25. static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
  26. static_assert(sizeof(MemoryManager::PageTableEntry) == 4);
  27. memset(m_pageTableZero, 0, 4096);
  28. memset(m_pageTableOne, 0, 4096);
  29. memset(m_pageDirectory, 0, 4096);
  30. kprintf("MM: Page directory @ %p\n", m_pageDirectory);
  31. kprintf("MM: Page table zero @ %p\n", m_pageTableZero);
  32. kprintf("MM: Page table one @ %p\n", m_pageTableOne);
  33. // Make null dereferences crash.
  34. protectMap(LinearAddress(0), 4 * KB);
  35. identityMap(LinearAddress(4096), 4 * MB);
  36. // Put pages between 4MB and 16MB in the page freelist.
  37. for (size_t i = (4 * MB) + 1024; i < (16 * MB); i += PAGE_SIZE) {
  38. m_freePages.append(PhysicalAddress(i));
  39. }
  40. asm volatile("movl %%eax, %%cr3"::"a"(m_pageDirectory));
  41. asm volatile(
  42. "movl %cr0, %eax\n"
  43. "orl $0x80000001, %eax\n"
  44. "movl %eax, %cr0\n"
  45. );
  46. }
  47. auto MemoryManager::ensurePTE(LinearAddress linearAddress) -> PageTableEntry
  48. {
  49. ASSERT_INTERRUPTS_DISABLED();
  50. dword pageDirectoryIndex = (linearAddress.get() >> 22) & 0x3ff;
  51. dword pageTableIndex = (linearAddress.get() >> 12) & 0x3ff;
  52. PageDirectoryEntry pde = PageDirectoryEntry(&m_pageDirectory[pageDirectoryIndex]);
  53. if (!pde.isPresent()) {
  54. kprintf("PDE %u !present, allocating\n", pageDirectoryIndex);
  55. if (pageDirectoryIndex == 0) {
  56. pde.setPageTableBase((dword)m_pageTableZero);
  57. pde.setUserAllowed(true);
  58. pde.setPresent(true);
  59. pde.setWritable(true);
  60. } else if (pageDirectoryIndex == 1) {
  61. pde.setPageTableBase((dword)m_pageTableOne);
  62. pde.setUserAllowed(true);
  63. pde.setPresent(true);
  64. pde.setWritable(true);
  65. } else {
  66. // FIXME: We need an allocator!
  67. ASSERT_NOT_REACHED();
  68. }
  69. }
  70. return PageTableEntry(&pde.pageTableBase()[pageTableIndex]);
  71. }
  72. void MemoryManager::protectMap(LinearAddress linearAddress, size_t length)
  73. {
  74. InterruptDisabler disabler;
  75. // FIXME: ASSERT(linearAddress is 4KB aligned);
  76. for (dword offset = 0; offset < length; offset += 4096) {
  77. auto pteAddress = linearAddress.offset(offset);
  78. auto pte = ensurePTE(pteAddress);
  79. pte.setPhysicalPageBase(pteAddress.get());
  80. pte.setUserAllowed(false);
  81. pte.setPresent(false);
  82. pte.setWritable(false);
  83. flushTLB(pteAddress);
  84. }
  85. }
  86. void MemoryManager::identityMap(LinearAddress linearAddress, size_t length)
  87. {
  88. InterruptDisabler disabler;
  89. // FIXME: ASSERT(linearAddress is 4KB aligned);
  90. for (dword offset = 0; offset < length; offset += 4096) {
  91. auto pteAddress = linearAddress.offset(offset);
  92. auto pte = ensurePTE(pteAddress);
  93. pte.setPhysicalPageBase(pteAddress.get());
  94. pte.setUserAllowed(true);
  95. pte.setPresent(true);
  96. pte.setWritable(true);
  97. flushTLB(pteAddress);
  98. }
  99. }
  100. void MemoryManager::initialize()
  101. {
  102. s_the = new MemoryManager;
  103. }
  104. PageFaultResponse MemoryManager::handlePageFault(const PageFault& fault)
  105. {
  106. ASSERT_INTERRUPTS_DISABLED();
  107. kprintf("MM: handlePageFault(%w) at laddr=%p\n", fault.code(), fault.address().get());
  108. if (fault.isNotPresent()) {
  109. kprintf(" >> NP fault!\n");
  110. } else if (fault.isProtectionViolation()) {
  111. kprintf(" >> PV fault!\n");
  112. }
  113. return PageFaultResponse::ShouldCrash;
  114. }
  115. RetainPtr<Zone> MemoryManager::createZone(size_t size)
  116. {
  117. auto pages = allocatePhysicalPages(ceilDiv(size, PAGE_SIZE));
  118. if (pages.isEmpty()) {
  119. kprintf("MM: createZone: no physical pages for size %u", size);
  120. return nullptr;
  121. }
  122. return adopt(*new Zone(move(pages)));
  123. }
  124. Vector<PhysicalAddress> MemoryManager::allocatePhysicalPages(size_t count)
  125. {
  126. InterruptDisabler disabler;
  127. if (count > m_freePages.size())
  128. return { };
  129. Vector<PhysicalAddress> pages;
  130. pages.ensureCapacity(count);
  131. for (size_t i = 0; i < count; ++i)
  132. pages.append(m_freePages.takeLast());
  133. return pages;
  134. }
  135. byte* MemoryManager::quickMapOnePage(PhysicalAddress physicalAddress)
  136. {
  137. ASSERT_INTERRUPTS_DISABLED();
  138. auto pte = ensurePTE(LinearAddress(4 * MB));
  139. kprintf("quickmap %x @ %x {pte @ %p}\n", physicalAddress.get(), 4*MB, pte.ptr());
  140. pte.setPhysicalPageBase(physicalAddress.pageBase());
  141. pte.setPresent(true);
  142. pte.setWritable(true);
  143. flushTLB(LinearAddress(4 * MB));
  144. return (byte*)(4 * MB);
  145. }
  146. void MemoryManager::flushEntireTLB()
  147. {
  148. asm volatile(
  149. "mov %cr3, %eax\n"
  150. "mov %eax, %cr3\n"
  151. );
  152. }
  153. void MemoryManager::flushTLB(LinearAddress laddr)
  154. {
  155. asm volatile("invlpg %0": :"m" (*(char*)laddr.get()));
  156. }
  157. bool MemoryManager::unmapRegion(Task& task, Task::Region& region)
  158. {
  159. InterruptDisabler disabler;
  160. auto& zone = *region.zone;
  161. for (size_t i = 0; i < zone.m_pages.size(); ++i) {
  162. auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
  163. auto pte = ensurePTE(laddr);
  164. pte.setPhysicalPageBase(0);
  165. pte.setPresent(false);
  166. pte.setWritable(false);
  167. pte.setUserAllowed(false);
  168. flushTLB(laddr);
  169. // kprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
  170. }
  171. return true;
  172. }
  173. bool MemoryManager::unmapRegionsForTask(Task& task)
  174. {
  175. ASSERT_INTERRUPTS_DISABLED();
  176. for (auto& region : task.m_regions) {
  177. if (!unmapRegion(task, *region))
  178. return false;
  179. }
  180. return true;
  181. }
  182. bool MemoryManager::mapRegion(Task& task, Task::Region& region)
  183. {
  184. InterruptDisabler disabler;
  185. auto& zone = *region.zone;
  186. for (size_t i = 0; i < zone.m_pages.size(); ++i) {
  187. auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
  188. auto pte = ensurePTE(laddr);
  189. pte.setPhysicalPageBase(zone.m_pages[i].get());
  190. pte.setPresent(true);
  191. pte.setWritable(true);
  192. pte.setUserAllowed(!task.isRing0());
  193. flushTLB(laddr);
  194. //kprintf("MM: >> Mapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
  195. }
  196. return true;
  197. }
  198. bool MemoryManager::mapRegionsForTask(Task& task)
  199. {
  200. ASSERT_INTERRUPTS_DISABLED();
  201. for (auto& region : task.m_regions) {
  202. if (!mapRegion(task, *region))
  203. return false;
  204. }
  205. return true;
  206. }
  207. bool copyToZone(Zone& zone, const void* data, size_t size)
  208. {
  209. if (zone.size() < size) {
  210. kprintf("copyToZone: can't fit %u bytes into zone with size %u\n", size, zone.size());
  211. return false;
  212. }
  213. InterruptDisabler disabler;
  214. auto* dataptr = (const byte*)data;
  215. size_t remaining = size;
  216. for (size_t i = 0; i < zone.m_pages.size(); ++i) {
  217. byte* dest = MemoryManager::the().quickMapOnePage(zone.m_pages[i]);
  218. kprintf("memcpy(%p, %p, %u)\n", dest, dataptr, min(PAGE_SIZE, remaining));
  219. memcpy(dest, dataptr, min(PAGE_SIZE, remaining));
  220. dataptr += PAGE_SIZE;
  221. remaining -= PAGE_SIZE;
  222. }
  223. return true;
  224. }