Kernel: Add implied auto qualifiers in Memory

This commit is contained in:
Hendiadyoin1 2021-12-28 19:54:05 +01:00 committed by Brian Gianforcaro
parent ae8c7eebbd
commit 1cdace7898
Notes: sideshowbarker 2024-07-17 21:16:02 +09:00
5 changed files with 27 additions and 27 deletions

View file

@ -218,7 +218,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
return m_region_lookup_cache.region.unsafe_ptr();
auto found_region = m_regions.find(range.base().get());
auto* found_region = m_regions.find(range.base().get());
if (!found_region)
return nullptr;
auto& region = *found_region;
@ -233,7 +233,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
SpinlockLocker lock(m_lock);
auto candidate = m_regions.find_largest_not_above(range.base().get());
auto* candidate = m_regions.find_largest_not_above(range.base().get());
if (!candidate)
return nullptr;
return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
@ -246,7 +246,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
SpinlockLocker lock(m_lock);
auto found_region = m_regions.find_largest_not_above(range.base().get());
auto* found_region = m_regions.find_largest_not_above(range.base().get());
if (!found_region)
return regions;
for (auto iter = m_regions.begin_from((*found_region)->vaddr().get()); !iter.is_end(); ++iter) {
@ -285,7 +285,7 @@ ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(const Re
};
Vector<Region*, 2> new_regions;
for (auto& new_range : remaining_ranges_after_unmap) {
auto new_region = TRY(try_make_replacement_region(new_range));
auto* new_region = TRY(try_make_replacement_region(new_range));
new_regions.unchecked_append(new_region);
}
return new_regions;
@ -295,17 +295,17 @@ void AddressSpace::dump_regions()
{
dbgln("Process regions:");
#if ARCH(I386)
auto addr_padding = "";
char const* addr_padding = "";
#else
auto addr_padding = " ";
char const* addr_padding = " ";
#endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(m_lock);
for (auto& sorted_region : m_regions) {
auto& region = *sorted_region;
for (auto const& sorted_region : m_regions) {
auto const& region = *sorted_region;
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
@ -331,7 +331,7 @@ size_t AddressSpace::amount_dirty_private() const
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
size_t amount = 0;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
if (!region->is_shared())
amount += region->amount_dirty();
}
@ -342,7 +342,7 @@ size_t AddressSpace::amount_clean_inode() const
{
SpinlockLocker lock(m_lock);
HashTable<const InodeVMObject*> vmobjects;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
if (region->vmobject().is_inode())
vmobjects.set(&static_cast<const InodeVMObject&>(region->vmobject()));
}
@ -356,7 +356,7 @@ size_t AddressSpace::amount_virtual() const
{
SpinlockLocker lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
amount += region->size();
}
return amount;
@ -367,7 +367,7 @@ size_t AddressSpace::amount_resident() const
SpinlockLocker lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
amount += region->amount_resident();
}
return amount;
@ -381,7 +381,7 @@ size_t AddressSpace::amount_shared() const
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
// so that every Region contributes +1 ref to each of its PhysicalPages.
size_t amount = 0;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
amount += region->amount_shared();
}
return amount;
@ -391,7 +391,7 @@ size_t AddressSpace::amount_purgeable_volatile() const
{
SpinlockLocker lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
if (!region->vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
@ -405,7 +405,7 @@ size_t AddressSpace::amount_purgeable_nonvolatile() const
{
SpinlockLocker lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
for (auto const& region : m_regions) {
if (!region->vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());

View file

@ -261,7 +261,7 @@ void AnonymousVMObject::ensure_or_reset_cow_map()
bool AnonymousVMObject::should_cow(size_t page_index, bool is_shared) const
{
auto& page = physical_pages()[page_index];
auto const& page = physical_pages()[page_index];
if (page && (page->is_shared_zero_page() || page->is_lazy_committed_page()))
return true;
if (is_shared)

View file

@ -101,13 +101,13 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
// Disable writing to the kernel text and rodata segments.
for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
for (auto const* i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_writable(false);
}
if (Processor::current().has_feature(CPUFeature::NX)) {
// Disable execution of the kernel data, bss and heap segments.
for (auto i = start_of_kernel_data; i < end_of_kernel_image; i += PAGE_SIZE) {
for (auto const* i = start_of_kernel_data; i < end_of_kernel_image; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_execute_disabled(true);
}
@ -216,7 +216,7 @@ bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAdd
if (start_address.offset_addition_would_overflow(read_length))
return false;
auto end_address = start_address.offset(read_length);
for (auto& current_range : m_reserved_memory_ranges) {
for (auto const& current_range : m_reserved_memory_ranges) {
if (current_range.start > start_address)
continue;
if (current_range.start.offset(current_range.length) < end_address)
@ -990,7 +990,7 @@ void MemoryManager::enter_process_address_space(Process& process)
void MemoryManager::enter_address_space(AddressSpace& space)
{
auto current_thread = Thread::current();
auto* current_thread = Thread::current();
VERIFY(current_thread != nullptr);
SpinlockLocker lock(s_mm_lock);
@ -1129,15 +1129,15 @@ void MemoryManager::dump_kernel_regions()
{
dbgln("Kernel regions:");
#if ARCH(I386)
auto addr_padding = "";
char const* addr_padding = "";
#else
auto addr_padding = " ";
char const* addr_padding = " ";
#endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(s_mm_lock);
for (auto* region_ptr : m_kernel_regions) {
auto& region = *region_ptr;
for (auto const* region_ptr : m_kernel_regions) {
auto const& region = *region_ptr;
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
region.vaddr().get(),
region.vaddr().offset(region.size() - 1).get(),

View file

@ -186,7 +186,7 @@ void PhysicalZone::dump() const
{
dbgln("(( {} used, {} available, page_count: {} ))", m_used_chunks, available(), m_page_count);
for (size_t i = 0; i <= max_order; ++i) {
auto& bucket = m_buckets[i];
auto const& bucket = m_buckets[i];
dbgln("[{:2} / {:4}] ", i, (size_t)(2u << i));
auto entry = bucket.freelist;
while (entry != -1) {

View file

@ -132,7 +132,7 @@ size_t Region::amount_resident() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
auto* page = physical_page(i);
auto const* page = physical_page(i);
if (page && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
bytes += PAGE_SIZE;
}
@ -143,7 +143,7 @@ size_t Region::amount_shared() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
auto* page = physical_page(i);
auto const* page = physical_page(i);
if (page && page->ref_count() > 1 && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
bytes += PAGE_SIZE;
}