Kernel+Userland: Rename prefix of user_physical => physical

There's no such supervisor pages concept, so there's no need to call
physical pages with the "user_physical" prefix anymore.
This commit is contained in:
Liav A 2022-07-14 15:27:22 +03:00 committed by Andreas Kling
parent 1c499e75bd
commit e4e5fa74d0
Notes: sideshowbarker 2024-07-18 04:46:35 +09:00
12 changed files with 101 additions and 102 deletions

View file

@ -446,10 +446,10 @@ private:
auto json = TRY(JsonObjectSerializer<>::try_create(builder));
TRY(json.add("kmalloc_allocated"sv, stats.bytes_allocated));
TRY(json.add("kmalloc_available"sv, stats.bytes_free));
TRY(json.add("user_physical_allocated"sv, system_memory.user_physical_pages_used));
TRY(json.add("user_physical_available"sv, system_memory.user_physical_pages - system_memory.user_physical_pages_used));
TRY(json.add("user_physical_committed"sv, system_memory.user_physical_pages_committed));
TRY(json.add("user_physical_uncommitted"sv, system_memory.user_physical_pages_uncommitted));
TRY(json.add("physical_allocated"sv, system_memory.physical_pages_used));
TRY(json.add("physical_available"sv, system_memory.physical_pages - system_memory.physical_pages_used));
TRY(json.add("physical_committed"sv, system_memory.physical_pages_committed));
TRY(json.add("physical_uncommitted"sv, system_memory.physical_pages_uncommitted));
TRY(json.add("kmalloc_call_count"sv, stats.kmalloc_call_count));
TRY(json.add("kfree_call_count"sv, stats.kfree_call_count));
TRY(json.finish());

View file

@ -319,7 +319,7 @@ struct KmallocGlobalData {
PANIC("Out of address space when expanding kmalloc heap.");
}
auto physical_pages_or_error = MM.commit_user_physical_pages(new_subheap_size / PAGE_SIZE);
auto physical_pages_or_error = MM.commit_physical_pages(new_subheap_size / PAGE_SIZE);
if (physical_pages_or_error.is_error()) {
// FIXME: Dare to return false!
PANIC("Out of physical pages when expanding kmalloc heap.");

View file

@ -42,7 +42,7 @@ ErrorOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed);
auto committed_pages = TRY(MM.commit_user_physical_pages(new_cow_pages_needed));
auto committed_pages = TRY(MM.commit_physical_pages(new_cow_pages_needed));
// Create or replace the committed cow pages. When cloning a previously
// cloned vmobject, we want to essentially "fork", leaving us and the
@ -79,7 +79,7 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_siz
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
committed_pages = TRY(MM.commit_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
}
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
@ -89,7 +89,7 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_siz
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
{
auto contiguous_physical_pages = TRY(MM.allocate_contiguous_user_physical_pages(size));
auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
@ -100,7 +100,7 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeabl
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
committed_pages = TRY(MM.commit_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
}
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
@ -257,7 +257,7 @@ ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged
return {};
}
m_unused_committed_pages = TRY(MM.commit_user_physical_pages(committed_pages_needed));
m_unused_committed_pages = TRY(MM.commit_physical_pages(committed_pages_needed));
for (auto& page : m_physical_pages) {
if (page->is_shared_zero_page())
@ -351,7 +351,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
page = m_shared_committed_cow_pages->take_one();
} else {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!");
auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
auto page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::No);
if (page_or_error.is_error()) {
dmesgln("MM: handle_cow_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;

View file

@ -87,7 +87,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
protect_kernel_image();
// We're temporarily "committing" to two pages that we need to allocate below
auto committed_pages = commit_user_physical_pages(2).release_value();
auto committed_pages = commit_physical_pages(2).release_value();
m_shared_zero_page = committed_pages.take_one();
@ -339,11 +339,11 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
}
for (auto& range : contiguous_physical_ranges) {
m_user_physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
m_physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
}
for (auto& region : m_user_physical_regions)
m_system_memory_info.user_physical_pages += region.size();
for (auto& region : m_physical_regions)
m_system_memory_info.physical_pages += region.size();
register_reserved_ranges();
for (auto& range : m_reserved_memory_ranges) {
@ -352,16 +352,16 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
initialize_physical_pages();
VERIFY(m_system_memory_info.user_physical_pages > 0);
VERIFY(m_system_memory_info.physical_pages > 0);
// We start out with no committed pages
m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
m_system_memory_info.physical_pages_uncommitted = m_system_memory_info.physical_pages;
for (auto& used_range : m_used_memory_ranges) {
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
}
for (auto& region : m_user_physical_regions) {
for (auto& region : m_physical_regions) {
dmesgln("MM: User physical region: {} - {} (size {:#x})", region.lower(), region.upper().offset(-1), PAGE_SIZE * region.size());
region.initialize_zones();
}
@ -399,8 +399,8 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
PhysicalRegion* found_region { nullptr };
Optional<size_t> found_region_index;
for (size_t i = 0; i < m_user_physical_regions.size(); ++i) {
auto& region = m_user_physical_regions[i];
for (size_t i = 0; i < m_physical_regions.size(); ++i) {
auto& region = m_physical_regions[i];
if (region.size() >= physical_page_array_pages_and_page_tables_count) {
found_region = &region;
found_region_index = i;
@ -413,12 +413,12 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
VERIFY_NOT_REACHED();
}
VERIFY(m_system_memory_info.user_physical_pages >= physical_page_array_pages_and_page_tables_count);
m_system_memory_info.user_physical_pages -= physical_page_array_pages_and_page_tables_count;
VERIFY(m_system_memory_info.physical_pages >= physical_page_array_pages_and_page_tables_count);
m_system_memory_info.physical_pages -= physical_page_array_pages_and_page_tables_count;
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
// We're stealing the entire region
m_physical_pages_region = m_user_physical_regions.take(*found_region_index);
m_physical_pages_region = m_physical_regions.take(*found_region_index);
} else {
m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
}
@ -567,7 +567,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
bool did_purge = false;
auto page_table_or_error = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
auto page_table_or_error = allocate_physical_page(ShouldZeroFill::Yes, &did_purge);
if (page_table_or_error.is_error()) {
dbgln("MM: Unable to allocate page table to map {}", vaddr);
return nullptr;
@ -756,7 +756,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
{
dma_buffer_page = TRY(allocate_user_physical_page());
dma_buffer_page = TRY(allocate_physical_page());
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
return allocate_kernel_region(dma_buffer_page->paddr(), PAGE_SIZE, name, access, Region::Cacheable::No);
}
@ -771,7 +771,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
{
VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_user_physical_pages(size));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
return allocate_kernel_region(dma_buffer_pages.first().paddr(), size, name, access, Region::Cacheable::No);
}
@ -824,27 +824,27 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
return region;
}
ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
SpinlockLocker lock(s_mm_lock);
if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
if (m_system_memory_info.physical_pages_uncommitted < page_count)
return ENOMEM;
m_system_memory_info.user_physical_pages_uncommitted -= page_count;
m_system_memory_info.user_physical_pages_committed += page_count;
m_system_memory_info.physical_pages_uncommitted -= page_count;
m_system_memory_info.physical_pages_committed += page_count;
return CommittedPhysicalPageSet { {}, page_count };
}
void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
void MemoryManager::uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
{
VERIFY(page_count > 0);
SpinlockLocker lock(s_mm_lock);
VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
VERIFY(m_system_memory_info.physical_pages_committed >= page_count);
m_system_memory_info.user_physical_pages_uncommitted += page_count;
m_system_memory_info.user_physical_pages_committed -= page_count;
m_system_memory_info.physical_pages_uncommitted += page_count;
m_system_memory_info.physical_pages_committed -= page_count;
}
void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
@ -852,40 +852,40 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
SpinlockLocker lock(s_mm_lock);
// Are we returning a user page?
for (auto& region : m_user_physical_regions) {
for (auto& region : m_physical_regions) {
if (!region.contains(paddr))
continue;
region.return_page(paddr);
--m_system_memory_info.user_physical_pages_used;
--m_system_memory_info.physical_pages_used;
// Always return pages to the uncommitted pool. Pages that were
// committed and allocated are only freed upon request. Once
// returned there is no guarantee being able to get them back.
++m_system_memory_info.user_physical_pages_uncommitted;
++m_system_memory_info.physical_pages_uncommitted;
return;
}
PANIC("MM: deallocate_user_physical_page couldn't figure out region for page @ {}", paddr);
PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr);
}
RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
{
VERIFY(s_mm_lock.is_locked());
RefPtr<PhysicalPage> page;
if (committed) {
// Draw from the committed pages pool. We should always have these pages available
VERIFY(m_system_memory_info.user_physical_pages_committed > 0);
m_system_memory_info.user_physical_pages_committed--;
VERIFY(m_system_memory_info.physical_pages_committed > 0);
m_system_memory_info.physical_pages_committed--;
} else {
// We need to make sure we don't touch pages that we have committed to
if (m_system_memory_info.user_physical_pages_uncommitted == 0)
if (m_system_memory_info.physical_pages_uncommitted == 0)
return {};
m_system_memory_info.user_physical_pages_uncommitted--;
m_system_memory_info.physical_pages_uncommitted--;
}
for (auto& region : m_user_physical_regions) {
for (auto& region : m_physical_regions) {
page = region.take_free_page();
if (!page.is_null()) {
++m_system_memory_info.user_physical_pages_used;
++m_system_memory_info.physical_pages_used;
break;
}
}
@ -893,10 +893,10 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
return page;
}
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{
SpinlockLocker lock(s_mm_lock);
auto page = find_free_user_physical_page(true);
auto page = find_free_physical_page(true);
if (should_zero_fill == ShouldZeroFill::Yes) {
auto* ptr = quickmap_page(*page);
memset(ptr, 0, PAGE_SIZE);
@ -905,10 +905,10 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page
return page.release_nonnull();
}
ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
SpinlockLocker lock(s_mm_lock);
auto page = find_free_user_physical_page(false);
auto page = find_free_physical_page(false);
bool purged_pages = false;
if (!page) {
@ -922,7 +922,7 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(
return IterationDecision::Continue;
if (auto purged_page_count = anonymous_vmobject.purge()) {
dbgln("MM: Purge saved the day! Purged {} pages from AnonymousVMObject", purged_page_count);
page = find_free_user_physical_page(false);
page = find_free_physical_page(false);
purged_pages = true;
VERIFY(page);
return IterationDecision::Break;
@ -946,25 +946,25 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(
return page.release_nonnull();
}
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_user_physical_pages(size_t size)
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(s_mm_lock);
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
// We need to make sure we don't touch pages that we have committed to
if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
if (m_system_memory_info.physical_pages_uncommitted < page_count)
return ENOMEM;
for (auto& physical_region : m_user_physical_regions) {
for (auto& physical_region : m_physical_regions) {
auto physical_pages = physical_region.take_contiguous_free_pages(page_count);
if (!physical_pages.is_empty()) {
{
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, "MemoryManager Allocation Sanitization"sv, Region::Access::Read | Region::Access::Write));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
}
m_system_memory_info.user_physical_pages_uncommitted -= page_count;
m_system_memory_info.user_physical_pages_used += page_count;
m_system_memory_info.physical_pages_uncommitted -= page_count;
m_system_memory_info.physical_pages_used += page_count;
return physical_pages;
}
}
@ -1149,21 +1149,21 @@ void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable
CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
{
if (m_page_count)
MM.uncommit_user_physical_pages({}, m_page_count);
MM.uncommit_physical_pages({}, m_page_count);
}
NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
{
VERIFY(m_page_count > 0);
--m_page_count;
return MM.allocate_committed_user_physical_page({}, MemoryManager::ShouldZeroFill::Yes);
return MM.allocate_committed_physical_page({}, MemoryManager::ShouldZeroFill::Yes);
}
void CommittedPhysicalPageSet::uncommit_one()
{
VERIFY(m_page_count > 0);
--m_page_count;
MM.uncommit_user_physical_pages({}, 1);
MM.uncommit_physical_pages({}, 1);
}
void MemoryManager::copy_physical_page(PhysicalPage& physical_page, u8 page_buffer[PAGE_SIZE])

View file

@ -170,12 +170,12 @@ public:
Yes
};
ErrorOr<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count);
void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_user_physical_pages(size_t size);
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress);
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
@ -190,10 +190,10 @@ public:
ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t);
struct SystemMemoryInfo {
PhysicalSize user_physical_pages { 0 };
PhysicalSize user_physical_pages_used { 0 };
PhysicalSize user_physical_pages_committed { 0 };
PhysicalSize user_physical_pages_uncommitted { 0 };
PhysicalSize physical_pages { 0 };
PhysicalSize physical_pages_used { 0 };
PhysicalSize physical_pages_committed { 0 };
PhysicalSize physical_pages_uncommitted { 0 };
};
SystemMemoryInfo get_system_memory_info()
@ -263,7 +263,7 @@ private:
static Region* find_region_from_vaddr(VirtualAddress);
RefPtr<PhysicalPage> find_free_user_physical_page(bool);
RefPtr<PhysicalPage> find_free_physical_page(bool);
ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
{
@ -285,8 +285,8 @@ private:
ALWAYS_INLINE void verify_system_memory_info_consistency() const
{
auto user_physical_pages_unused = m_system_memory_info.user_physical_pages_committed + m_system_memory_info.user_physical_pages_uncommitted;
VERIFY(m_system_memory_info.user_physical_pages == (m_system_memory_info.user_physical_pages_used + user_physical_pages_unused));
auto physical_pages_unused = m_system_memory_info.physical_pages_committed + m_system_memory_info.physical_pages_uncommitted;
VERIFY(m_system_memory_info.physical_pages == (m_system_memory_info.physical_pages_used + physical_pages_unused));
}
RefPtr<PageDirectory> m_kernel_page_directory;
@ -296,7 +296,7 @@ private:
SystemMemoryInfo m_system_memory_info;
NonnullOwnPtrVector<PhysicalRegion> m_user_physical_regions;
NonnullOwnPtrVector<PhysicalRegion> m_physical_regions;
OwnPtr<PhysicalRegion> m_physical_pages_region;
PhysicalPageEntry* m_physical_page_entries { nullptr };
size_t m_physical_page_entries_count { 0 };

View file

@ -33,13 +33,13 @@ ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace()
SpinlockLocker lock(s_mm_lock);
#if ARCH(X86_64)
directory->m_pml4t = TRY(MM.allocate_user_physical_page());
directory->m_pml4t = TRY(MM.allocate_physical_page());
#endif
directory->m_directory_table = TRY(MM.allocate_user_physical_page());
directory->m_directory_table = TRY(MM.allocate_physical_page());
auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) {
directory->m_directory_pages[i] = TRY(MM.allocate_user_physical_page());
directory->m_directory_pages[i] = TRY(MM.allocate_physical_page());
}
// Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)

View file

@ -428,7 +428,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
} else {
auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
auto page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (page_or_error.is_error()) {
dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
@ -517,7 +517,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
auto vmobject_physical_page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
auto vmobject_physical_page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::No);
if (vmobject_physical_page_or_error.is_error()) {
dmesgln("MM: handle_inode_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;

View file

@ -14,7 +14,7 @@ ErrorOr<NonnullRefPtr<SharedFramebufferVMObject>> SharedFramebufferVMObject::try
{
auto real_framebuffer_vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
auto committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
auto committed_pages = TRY(MM.commit_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
auto vm_object = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedFramebufferVMObject(move(new_physical_pages), move(committed_pages), real_framebuffer_vmobject)));
TRY(vm_object->create_fake_writes_framebuffer_vm_object());
TRY(vm_object->create_real_writes_framebuffer_vm_object());
@ -25,7 +25,7 @@ ErrorOr<NonnullRefPtr<SharedFramebufferVMObject>> SharedFramebufferVMObject::try
{
auto real_framebuffer_vmobject = TRY(AnonymousVMObject::try_create_with_size(size, AllocationStrategy::AllocateNow));
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
auto committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
auto committed_pages = TRY(MM.commit_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
auto vm_object = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedFramebufferVMObject(move(new_physical_pages), move(committed_pages), real_framebuffer_vmobject)));
TRY(vm_object->create_fake_writes_framebuffer_vm_object());
TRY(vm_object->create_real_writes_framebuffer_vm_object());

View file

@ -22,7 +22,7 @@ namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<AHCIPort>> AHCIPort::create(AHCIController const& controller, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
{
auto identify_buffer_page = MUST(MM.allocate_user_physical_page());
auto identify_buffer_page = MUST(MM.allocate_physical_page());
auto port = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) AHCIPort(controller, move(identify_buffer_page), hba_capabilities, registers, port_index)));
TRY(port->allocate_resources_and_initialize_ports());
return port;
@ -35,14 +35,14 @@ ErrorOr<void> AHCIPort::allocate_resources_and_initialize_ports()
return {};
}
m_fis_receive_page = TRY(MM.allocate_user_physical_page());
m_fis_receive_page = TRY(MM.allocate_physical_page());
for (size_t index = 0; index < 1; index++) {
auto dma_page = TRY(MM.allocate_user_physical_page());
auto dma_page = TRY(MM.allocate_physical_page());
m_dma_buffers.append(move(dma_page));
}
for (size_t index = 0; index < 1; index++) {
auto command_table_page = TRY(MM.allocate_user_physical_page());
auto command_table_page = TRY(MM.allocate_physical_page());
m_command_table_pages.append(move(command_table_page));
}

View file

@ -193,13 +193,13 @@ private:
auto const& obj = json.as_object();
unsigned kmalloc_allocated = obj.get("kmalloc_allocated"sv).to_u32();
unsigned kmalloc_available = obj.get("kmalloc_available"sv).to_u32();
auto user_physical_allocated = obj.get("user_physical_allocated"sv).to_u64();
auto user_physical_committed = obj.get("user_physical_committed"sv).to_u64();
auto user_physical_uncommitted = obj.get("user_physical_uncommitted"sv).to_u64();
auto physical_allocated = obj.get("physical_allocated"sv).to_u64();
auto physical_committed = obj.get("physical_committed"sv).to_u64();
auto physical_uncommitted = obj.get("physical_uncommitted"sv).to_u64();
unsigned kmalloc_bytes_total = kmalloc_allocated + kmalloc_available;
unsigned kmalloc_pages_total = (kmalloc_bytes_total + PAGE_SIZE - 1) / PAGE_SIZE;
u64 total_userphysical_and_swappable_pages = kmalloc_pages_total + user_physical_allocated + user_physical_committed + user_physical_uncommitted;
allocated = kmalloc_allocated + ((user_physical_allocated + user_physical_committed) * PAGE_SIZE);
u64 total_userphysical_and_swappable_pages = kmalloc_pages_total + physical_allocated + physical_committed + physical_uncommitted;
allocated = kmalloc_allocated + ((physical_allocated + physical_committed) * PAGE_SIZE);
available = (total_userphysical_and_swappable_pages * PAGE_SIZE) - allocated;
return true;
}

View file

@ -60,8 +60,8 @@ MemoryStatsWidget::MemoryStatsWidget(GraphWidget* graph)
return label;
};
m_user_physical_pages_label = build_widgets_for_label("Physical memory:");
m_user_physical_pages_committed_label = build_widgets_for_label("Committed memory:");
m_physical_pages_label = build_widgets_for_label("Physical memory:");
m_physical_pages_committed_label = build_widgets_for_label("Committed memory:");
m_kmalloc_space_label = build_widgets_for_label("Kernel heap:");
m_kmalloc_count_label = build_widgets_for_label("Calls kmalloc:");
m_kfree_count_label = build_widgets_for_label("Calls kfree:");
@ -115,23 +115,22 @@ void MemoryStatsWidget::refresh()
u32 kmalloc_allocated = json.get("kmalloc_allocated"sv).to_u32();
u32 kmalloc_available = json.get("kmalloc_available"sv).to_u32();
u64 user_physical_allocated = json.get("user_physical_allocated"sv).to_u64();
u64 user_physical_available = json.get("user_physical_available"sv).to_u64();
u64 user_physical_committed = json.get("user_physical_committed"sv).to_u64();
u64 user_physical_uncommitted = json.get("user_physical_uncommitted"sv).to_u64();
u64 physical_allocated = json.get("physical_allocated"sv).to_u64();
u64 physical_available = json.get("physical_available"sv).to_u64();
u64 physical_committed = json.get("physical_committed"sv).to_u64();
u64 physical_uncommitted = json.get("physical_uncommitted"sv).to_u64();
u32 kmalloc_call_count = json.get("kmalloc_call_count"sv).to_u32();
u32 kfree_call_count = json.get("kfree_call_count"sv).to_u32();
u64 kmalloc_bytes_total = kmalloc_allocated + kmalloc_available;
u64 user_physical_pages_total = user_physical_allocated + user_physical_available;
u64 physical_pages_total = physical_allocated + physical_available;
u64 physical_pages_total = user_physical_pages_total;
u64 physical_pages_in_use = user_physical_allocated;
u64 total_userphysical_and_swappable_pages = user_physical_allocated + user_physical_committed + user_physical_uncommitted;
u64 physical_pages_in_use = physical_allocated;
u64 total_userphysical_and_swappable_pages = physical_allocated + physical_committed + physical_uncommitted;
m_kmalloc_space_label->set_text(String::formatted("{}/{}", human_readable_size(kmalloc_allocated), human_readable_size(kmalloc_bytes_total)));
m_user_physical_pages_label->set_text(String::formatted("{}/{}", human_readable_size(page_count_to_bytes(physical_pages_in_use)), human_readable_size(page_count_to_bytes(physical_pages_total))));
m_user_physical_pages_committed_label->set_text(String::formatted("{}", human_readable_size(page_count_to_bytes(user_physical_committed))));
m_physical_pages_label->set_text(String::formatted("{}/{}", human_readable_size(page_count_to_bytes(physical_pages_in_use)), human_readable_size(page_count_to_bytes(physical_pages_total))));
m_physical_pages_committed_label->set_text(String::formatted("{}", human_readable_size(page_count_to_bytes(physical_committed))));
m_kmalloc_count_label->set_text(String::formatted("{}", kmalloc_call_count));
m_kfree_count_label->set_text(String::formatted("{}", kfree_call_count));
m_kmalloc_difference_label->set_text(String::formatted("{:+}", kmalloc_call_count - kfree_call_count));
@ -143,7 +142,7 @@ void MemoryStatsWidget::refresh()
if (m_graph) {
m_graph->set_max(page_count_to_bytes(total_userphysical_and_swappable_pages) + kmalloc_bytes_total);
m_graph->add_value({ page_count_to_bytes(user_physical_committed), page_count_to_bytes(user_physical_allocated), kmalloc_bytes_total });
m_graph->add_value({ page_count_to_bytes(physical_committed), page_count_to_bytes(physical_allocated), kmalloc_bytes_total });
}
}

View file

@ -34,8 +34,8 @@ private:
GraphWidget* m_graph;
// Is null if we have a valid graph
String m_graph_widget_name {};
RefPtr<GUI::Label> m_user_physical_pages_label;
RefPtr<GUI::Label> m_user_physical_pages_committed_label;
RefPtr<GUI::Label> m_physical_pages_label;
RefPtr<GUI::Label> m_physical_pages_committed_label;
RefPtr<GUI::Label> m_kmalloc_space_label;
RefPtr<GUI::Label> m_kmalloc_count_label;
RefPtr<GUI::Label> m_kfree_count_label;