Kernel: Remove MM_DEBUG debug spam code
This was too spammy to ever actually be used anyway.
This commit is contained in:
parent
7c4ddecacb
commit
f7435dd95f
Notes:
sideshowbarker
2024-07-18 23:55:16 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/f7435dd95f0
6 changed files with 1 additions and 77 deletions
|
@ -57,7 +57,6 @@
|
|||
|
||||
//#define DEBUG_IO
|
||||
//#define DEBUG_POLL_SELECT
|
||||
//#define MM_DEBUG
|
||||
//#define PROCESS_DEBUG
|
||||
//#define SIGNAL_DEBUG
|
||||
|
||||
|
@ -359,9 +358,6 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
|
|||
#endif
|
||||
|
||||
m_page_directory = PageDirectory::create_for_userspace(*this, fork_parent ? &fork_parent->page_directory().range_allocator() : nullptr);
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "Process " << pid().value() << " ctor: PD=" << m_page_directory.ptr() << " created";
|
||||
#endif
|
||||
|
||||
if (fork_parent) {
|
||||
// NOTE: fork() doesn't clone all threads; the thread that called fork() becomes the only thread in the new process.
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#include <Kernel/VM/PhysicalRegion.h>
|
||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||
|
||||
//#define MM_DEBUG
|
||||
//#define PAGE_FAULT_DEBUG
|
||||
|
||||
extern u8* start_of_kernel_image;
|
||||
|
@ -159,10 +158,6 @@ void MemoryManager::parse_memory_map()
|
|||
continue;
|
||||
}
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
klog() << "MM: considering memory at " << String::format("%p", (void*)mmap->addr) << " - " << String::format("%p", (void*)(mmap->addr + mmap->len));
|
||||
#endif
|
||||
|
||||
for (size_t page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
|
||||
auto addr = PhysicalAddress(page_base);
|
||||
|
||||
|
@ -237,9 +232,6 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
|||
auto* pd = quickmap_pd(page_directory, page_directory_table_index);
|
||||
PageDirectoryEntry& pde = pd[page_directory_index];
|
||||
if (!pde.is_present()) {
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: PDE " << page_directory_index << " not present (requested for " << vaddr << "), allocating";
|
||||
#endif
|
||||
bool did_purge = false;
|
||||
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
|
||||
if (!page_table) {
|
||||
|
@ -255,9 +247,6 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
|||
|
||||
ASSERT(!pde.is_present()); // Should have not changed
|
||||
}
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: PD K" << &page_directory << " (" << (&page_directory == m_kernel_page_directory ? "Kernel" : "User") << ") at " << PhysicalAddress(page_directory.cr3()) << " allocated page table #" << page_directory_index << " (for " << vaddr << ") at " << page_table->paddr();
|
||||
#endif
|
||||
pde.set_page_table_base(page_table->paddr().get());
|
||||
pde.set_user_allowed(true);
|
||||
pde.set_present(true);
|
||||
|
@ -303,9 +292,6 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
|
|||
|
||||
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
|
||||
ASSERT(result);
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: Released page table for " << VirtualAddress(vaddr.get() & ~0x1fffff);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -314,9 +300,6 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
|
|||
void MemoryManager::initialize(u32 cpu)
|
||||
{
|
||||
auto mm_data = new MemoryManagerData;
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: Processor #" << cpu << " specific data at " << VirtualAddress(mm_data);
|
||||
#endif
|
||||
Processor::current().set_mm_data(*mm_data);
|
||||
|
||||
if (cpu == 0) {
|
||||
|
@ -343,9 +326,6 @@ Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress v
|
|||
if (region.contains(vaddr))
|
||||
return ®ion;
|
||||
}
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << process << " Couldn't find user region for " << vaddr;
|
||||
#endif
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -588,10 +568,6 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: allocate_user_physical_page vending " << page->paddr();
|
||||
#endif
|
||||
|
||||
if (should_zero_fill == ShouldZeroFill::Yes) {
|
||||
auto* ptr = quickmap_page(*page);
|
||||
memset(ptr, 0, PAGE_SIZE);
|
||||
|
@ -671,10 +647,6 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
|
|||
return {};
|
||||
}
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: allocate_supervisor_physical_page vending " << page->paddr();
|
||||
#endif
|
||||
|
||||
fast_u32_fill((u32*)page->paddr().offset(0xc0000000).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
|
||||
++m_super_physical_pages_used;
|
||||
return page;
|
||||
|
@ -692,17 +664,11 @@ void MemoryManager::enter_process_paging_scope(Process& process)
|
|||
|
||||
void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
|
||||
{
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: Flush " << page_count << " pages at " << vaddr << " on CPU#" << Processor::current().id();
|
||||
#endif
|
||||
Processor::flush_tlb_local(vaddr, page_count);
|
||||
}
|
||||
|
||||
void MemoryManager::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
|
||||
{
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: Flush " << page_count << " pages at " << vaddr;
|
||||
#endif
|
||||
Processor::flush_tlb(page_directory, vaddr, page_count);
|
||||
}
|
||||
|
||||
|
@ -715,9 +681,6 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
|
|||
auto& pte = boot_pd3_pt1023[4];
|
||||
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
|
||||
if (pte.physical_page_base() != pd_paddr.as_ptr()) {
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "quickmap_pd: Mapping P" << (void*)directory.m_directory_pages[pdpt_index]->paddr().as_ptr() << " at 0xffe04000 in pte @ " << &pte;
|
||||
#endif
|
||||
pte.set_physical_page_base(pd_paddr.get());
|
||||
pte.set_present(true);
|
||||
pte.set_writable(true);
|
||||
|
@ -743,9 +706,6 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
|||
auto& mm_data = get_data();
|
||||
auto& pte = boot_pd3_pt1023[0];
|
||||
if (pte.physical_page_base() != pt_paddr.as_ptr()) {
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "quickmap_pt: Mapping P" << (void*)pt_paddr.as_ptr() << " at 0xffe00000 in pte @ " << &pte;
|
||||
#endif
|
||||
pte.set_physical_page_base(pt_paddr.get());
|
||||
pte.set_present(true);
|
||||
pte.set_writable(true);
|
||||
|
@ -777,9 +737,6 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
|
|||
|
||||
auto& pte = boot_pd3_pt1023[pte_idx];
|
||||
if (pte.physical_page_base() != physical_page.paddr().as_ptr()) {
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "quickmap_page: Mapping P" << (void*)physical_page.paddr().as_ptr() << " at 0xffe08000 in pte @ " << &pte;
|
||||
#endif
|
||||
pte.set_physical_page_base(physical_page.paddr().get());
|
||||
pte.set_present(true);
|
||||
pte.set_writable(true);
|
||||
|
|
|
@ -150,9 +150,6 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
|
|||
|
||||
PageDirectory::~PageDirectory()
|
||||
{
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: ~PageDirectory K" << this;
|
||||
#endif
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
if (m_process)
|
||||
cr3_map().remove(cr3());
|
||||
|
|
|
@ -50,10 +50,6 @@ void PhysicalPage::return_to_freelist() const
|
|||
MM.deallocate_supervisor_physical_page(*this);
|
||||
else
|
||||
MM.deallocate_user_physical_page(*this);
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
dbgln("MM: {} released to freelist", m_paddr);
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include <Kernel/VM/Region.h>
|
||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||
|
||||
//#define MM_DEBUG
|
||||
//#define PAGE_FAULT_DEBUG
|
||||
|
||||
namespace Kernel {
|
||||
|
@ -110,9 +109,6 @@ OwnPtr<Region> Region::clone(Process& new_owner)
|
|||
|
||||
if (m_shared) {
|
||||
ASSERT(!m_stack);
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "Region::clone(): Sharing " << name() << " (" << vaddr() << ")";
|
||||
#endif
|
||||
if (vmobject().is_inode())
|
||||
ASSERT(vmobject().is_shared_inode());
|
||||
|
||||
|
@ -132,9 +128,6 @@ OwnPtr<Region> Region::clone(Process& new_owner)
|
|||
if (!vmobject_clone)
|
||||
return {};
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "Region::clone(): CoWing " << name() << " (" << vaddr() << ")";
|
||||
#endif
|
||||
// Set up a COW region. The parent (this) region becomes COW as well!
|
||||
remap();
|
||||
auto clone_region = Region::create_user_accessible(&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access);
|
||||
|
@ -276,12 +269,8 @@ bool Region::map_individual_page_impl(size_t page_index)
|
|||
ASSERT(m_page_directory->get_lock().own_lock());
|
||||
auto page_vaddr = vaddr_from_page_index(page_index);
|
||||
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
|
||||
if (!pte) {
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << " " << name() << " cannot create PTE for " << page_vaddr;
|
||||
#endif
|
||||
if (!pte)
|
||||
return false;
|
||||
}
|
||||
auto* page = physical_page(page_index);
|
||||
if (!page || (!is_readable() && !is_writable())) {
|
||||
pte->clear();
|
||||
|
@ -296,9 +285,6 @@ bool Region::map_individual_page_impl(size_t page_index)
|
|||
if (Processor::current().has_feature(CPUFeature::NX))
|
||||
pte->set_execute_disabled(!is_executable());
|
||||
pte->set_user_allowed(is_user_accessible());
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte->raw() << "{" << pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")";
|
||||
#endif
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -405,9 +391,6 @@ bool Region::map(PageDirectory& page_directory)
|
|||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock page_lock(page_directory.get_lock());
|
||||
set_page_directory(page_directory);
|
||||
#ifdef MM_DEBUG
|
||||
dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";
|
||||
#endif
|
||||
size_t page_index = 0;
|
||||
while (page_index < page_count()) {
|
||||
if (!map_individual_page_impl(page_index))
|
||||
|
@ -576,10 +559,6 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
if (current_thread)
|
||||
current_thread->did_inode_fault();
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
dbgln("MM: page_in_from_inode ready to read from inode");
|
||||
#endif
|
||||
|
||||
u8 page_buffer[PAGE_SIZE];
|
||||
auto& inode = inode_vmobject.inode();
|
||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
|
||||
|
|
|
@ -103,7 +103,6 @@ add_compile_definitions("MBR_DEBUG")
|
|||
add_compile_definitions("MEMORY_DEBUG")
|
||||
add_compile_definitions("MENU_DEBUG")
|
||||
add_compile_definitions("MINIMIZE_ANIMATION_DEBUG")
|
||||
add_compile_definitions("MM_DEBUG")
|
||||
add_compile_definitions("MOVE_DEBUG")
|
||||
add_compile_definitions("MULTIPROCESSOR_DEBUG")
|
||||
add_compile_definitions("NETWORK_TASK_DEBUG")
|
||||
|
|
Loading…
Add table
Reference in a new issue