#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1))) class KBuffer; class SynthFSInode; #define MM MemoryManager::the() class MemoryManager { AK_MAKE_ETERNAL friend class PageDirectory; friend class PhysicalPage; friend class PhysicalRegion; friend class Region; friend class VMObject; friend Optional procfs$mm(InodeIdentifier); friend Optional procfs$memstat(InodeIdentifier); public: static MemoryManager& the(); static void initialize(u32 physical_address_for_kernel_page_tables); PageFaultResponse handle_page_fault(const PageFault&); void enter_process_paging_scope(Process&); bool validate_user_stack(const Process&, VirtualAddress) const; bool validate_user_read(const Process&, VirtualAddress, size_t) const; bool validate_user_write(const Process&, VirtualAddress, size_t) const; bool validate_kernel_read(const Process&, VirtualAddress, size_t) const; enum class ShouldZeroFill { No, Yes }; RefPtr allocate_user_physical_page(ShouldZeroFill); RefPtr allocate_supervisor_physical_page(); void deallocate_user_physical_page(PhysicalPage&&); void deallocate_supervisor_physical_page(PhysicalPage&&); void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false); OwnPtr allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true); OwnPtr allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access); OwnPtr allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access); unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } unsigned super_physical_pages() const { return m_super_physical_pages; } unsigned super_physical_pages_used() const { return m_super_physical_pages_used; } template static void for_each_vmobject(Callback callback) { for (auto& vmobject : MM.m_vmobjects) { if (callback(vmobject) == IterationDecision::Break) break; } } static Region* region_from_vaddr(Process&, VirtualAddress); static const Region* region_from_vaddr(const Process&, VirtualAddress); private: MemoryManager(u32 physical_address_for_kernel_page_tables); ~MemoryManager(); enum class AccessSpace { Kernel, User }; enum class AccessType { Read, Write }; template bool validate_range(const Process&, VirtualAddress, size_t) const; void register_vmobject(VMObject&); void unregister_vmobject(VMObject&); void register_region(Region&); void unregister_region(Region&); void detect_cpu_features(); void initialize_paging(); void flush_entire_tlb(); void flush_tlb(VirtualAddress); void map_protected(VirtualAddress, size_t length); void create_identity_mapping(PageDirectory&, VirtualAddress, size_t length); static Region* user_region_from_vaddr(Process&, VirtualAddress); static Region* kernel_region_from_vaddr(VirtualAddress); static Region* region_from_vaddr(VirtualAddress); RefPtr find_free_user_physical_page(); u8* quickmap_page(PhysicalPage&); void unquickmap_page(); PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); RefPtr m_kernel_page_directory; PageTableEntry* m_low_page_tables[4] { nullptr }; VirtualAddress m_quickmap_addr; unsigned m_user_physical_pages { 0 }; unsigned m_user_physical_pages_used { 0 }; unsigned m_super_physical_pages { 0 }; unsigned m_super_physical_pages_used { 0 }; NonnullRefPtrVector m_user_physical_regions; NonnullRefPtrVector m_super_physical_regions; InlineLinkedList m_user_regions; InlineLinkedList m_kernel_regions; InlineLinkedList m_vmobjects; bool m_quickmap_in_use { false }; }; struct ProcessPagingScope { ProcessPagingScope(Process&); ~ProcessPagingScope(); }; template void VMObject::for_each_region(Callback callback) { // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes. // Perhaps VMObject could have a Vector with all of his mappers? for (auto& region : MM.m_user_regions) { if (®ion.vmobject() == this) callback(region); } for (auto& region : MM.m_kernel_regions) { if (®ion.vmobject() == this) callback(region); } } inline bool is_user_address(VirtualAddress vaddr) { return vaddr.get() >= (8 * MB) && vaddr.get() < 0xc0000000; }