mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-26 09:30:24 +00:00
Kernel: Replace process' regions vector with a Red Black tree
This should provide some speed up, as currently searches for regions containing a given address were performed in O(n) complexity, while this container allows us to do those in O(logn).
This commit is contained in:
parent
497c759ab7
commit
2c93123daf
Notes:
sideshowbarker
2024-07-18 20:28:19 +09:00
Author: https://github.com/IdanHo Commit: https://github.com/SerenityOS/serenity/commit/2c93123daf6 Pull-request: https://github.com/SerenityOS/serenity/pull/6167 Reviewed-by: https://github.com/awesomekling Reviewed-by: https://github.com/bgianfo
7 changed files with 89 additions and 97 deletions
|
@ -137,17 +137,17 @@ KResult CoreDump::write_program_headers(size_t notes_size)
|
|||
|
||||
phdr.p_type = PT_LOAD;
|
||||
phdr.p_offset = offset;
|
||||
phdr.p_vaddr = region.vaddr().get();
|
||||
phdr.p_vaddr = region->vaddr().get();
|
||||
phdr.p_paddr = 0;
|
||||
|
||||
phdr.p_filesz = region.page_count() * PAGE_SIZE;
|
||||
phdr.p_memsz = region.page_count() * PAGE_SIZE;
|
||||
phdr.p_filesz = region->page_count() * PAGE_SIZE;
|
||||
phdr.p_memsz = region->page_count() * PAGE_SIZE;
|
||||
phdr.p_align = 0;
|
||||
|
||||
phdr.p_flags = region.is_readable() ? PF_R : 0;
|
||||
if (region.is_writable())
|
||||
phdr.p_flags = region->is_readable() ? PF_R : 0;
|
||||
if (region->is_writable())
|
||||
phdr.p_flags |= PF_W;
|
||||
if (region.is_executable())
|
||||
if (region->is_executable())
|
||||
phdr.p_flags |= PF_X;
|
||||
|
||||
offset += phdr.p_filesz;
|
||||
|
@ -174,20 +174,20 @@ KResult CoreDump::write_program_headers(size_t notes_size)
|
|||
KResult CoreDump::write_regions()
|
||||
{
|
||||
for (auto& region : m_process->space().regions()) {
|
||||
if (region.is_kernel())
|
||||
if (region->is_kernel())
|
||||
continue;
|
||||
|
||||
region.set_readable(true);
|
||||
region.remap();
|
||||
region->set_readable(true);
|
||||
region->remap();
|
||||
|
||||
for (size_t i = 0; i < region.page_count(); i++) {
|
||||
auto* page = region.physical_page(i);
|
||||
for (size_t i = 0; i < region->page_count(); i++) {
|
||||
auto* page = region->physical_page(i);
|
||||
|
||||
uint8_t zero_buffer[PAGE_SIZE] = {};
|
||||
Optional<UserOrKernelBuffer> src_buffer;
|
||||
|
||||
if (page) {
|
||||
src_buffer = UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
|
||||
src_buffer = UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region->vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
|
||||
} else {
|
||||
// If the current page is not backed by a physical page, we zero it in the coredump file.
|
||||
// TODO: Do we want to include the contents of pages that have not been faulted-in in the coredump?
|
||||
|
@ -253,20 +253,20 @@ ByteBuffer CoreDump::create_notes_threads_data() const
|
|||
ByteBuffer CoreDump::create_notes_regions_data() const
|
||||
{
|
||||
ByteBuffer regions_data;
|
||||
for (size_t region_index = 0; region_index < m_process->space().region_count(); ++region_index) {
|
||||
size_t region_index = 0;
|
||||
for (auto& region : m_process->space().regions()) {
|
||||
|
||||
ByteBuffer memory_region_info_buffer;
|
||||
ELF::Core::MemoryRegionInfo info {};
|
||||
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
|
||||
|
||||
auto& region = m_process->space().regions()[region_index];
|
||||
info.region_start = region.vaddr().get();
|
||||
info.region_end = region.vaddr().offset(region.size()).get();
|
||||
info.program_header_index = region_index;
|
||||
info.region_start = region->vaddr().get();
|
||||
info.region_end = region->vaddr().offset(region->size()).get();
|
||||
info.program_header_index = region_index++;
|
||||
|
||||
memory_region_info_buffer.append((void*)&info, sizeof(info));
|
||||
|
||||
auto name = region.name();
|
||||
auto name = region->name();
|
||||
if (name.is_null())
|
||||
name = String::empty();
|
||||
memory_region_info_buffer.append(name.characters(), name.length() + 1);
|
||||
|
|
|
@ -321,31 +321,31 @@ static bool procfs$pid_vm(InodeIdentifier identifier, KBufferBuilder& builder)
|
|||
{
|
||||
ScopedSpinLock lock(process->space().get_lock());
|
||||
for (auto& region : process->space().regions()) {
|
||||
if (!region.is_user() && !Process::current()->is_superuser())
|
||||
if (!region->is_user() && !Process::current()->is_superuser())
|
||||
continue;
|
||||
auto region_object = array.add_object();
|
||||
region_object.add("readable", region.is_readable());
|
||||
region_object.add("writable", region.is_writable());
|
||||
region_object.add("executable", region.is_executable());
|
||||
region_object.add("stack", region.is_stack());
|
||||
region_object.add("shared", region.is_shared());
|
||||
region_object.add("syscall", region.is_syscall_region());
|
||||
region_object.add("purgeable", region.vmobject().is_anonymous());
|
||||
if (region.vmobject().is_anonymous()) {
|
||||
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile());
|
||||
region_object.add("readable", region->is_readable());
|
||||
region_object.add("writable", region->is_writable());
|
||||
region_object.add("executable", region->is_executable());
|
||||
region_object.add("stack", region->is_stack());
|
||||
region_object.add("shared", region->is_shared());
|
||||
region_object.add("syscall", region->is_syscall_region());
|
||||
region_object.add("purgeable", region->vmobject().is_anonymous());
|
||||
if (region->vmobject().is_anonymous()) {
|
||||
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region->vmobject()).is_any_volatile());
|
||||
}
|
||||
region_object.add("cacheable", region.is_cacheable());
|
||||
region_object.add("address", region.vaddr().get());
|
||||
region_object.add("size", region.size());
|
||||
region_object.add("amount_resident", region.amount_resident());
|
||||
region_object.add("amount_dirty", region.amount_dirty());
|
||||
region_object.add("cow_pages", region.cow_pages());
|
||||
region_object.add("name", region.name());
|
||||
region_object.add("vmobject", region.vmobject().class_name());
|
||||
region_object.add("cacheable", region->is_cacheable());
|
||||
region_object.add("address", region->vaddr().get());
|
||||
region_object.add("size", region->size());
|
||||
region_object.add("amount_resident", region->amount_resident());
|
||||
region_object.add("amount_dirty", region->amount_dirty());
|
||||
region_object.add("cow_pages", region->cow_pages());
|
||||
region_object.add("name", region->name());
|
||||
region_object.add("vmobject", region->vmobject().class_name());
|
||||
|
||||
StringBuilder pagemap_builder;
|
||||
for (size_t i = 0; i < region.page_count(); ++i) {
|
||||
auto* page = region.physical_page(i);
|
||||
for (size_t i = 0; i < region->page_count(); ++i) {
|
||||
auto* page = region->physical_page(i);
|
||||
if (!page)
|
||||
pagemap_builder.append('N');
|
||||
else if (page->is_shared_zero_page() || page->is_lazy_committed_page())
|
||||
|
|
|
@ -203,8 +203,8 @@ void PerformanceEventBuffer::add_process(const Process& process)
|
|||
|
||||
for (auto& region : process.space().regions()) {
|
||||
sampled_process->regions.append(SampledProcess::Region {
|
||||
.name = region.name(),
|
||||
.range = region.range(),
|
||||
.name = region->name(),
|
||||
.range = region->range(),
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,8 @@ KResultOr<pid_t> Process::sys$fork(RegisterState& regs)
|
|||
{
|
||||
ScopedSpinLock lock(space().get_lock());
|
||||
for (auto& region : space().regions()) {
|
||||
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", ®ion, region.name(), region.vaddr());
|
||||
auto region_clone = region.clone(*child);
|
||||
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr());
|
||||
auto region_clone = region->clone(*child);
|
||||
if (!region_clone) {
|
||||
dbgln("fork: Cannot clone region, insufficient memory");
|
||||
// TODO: tear down new process?
|
||||
|
@ -96,7 +96,7 @@ KResultOr<pid_t> Process::sys$fork(RegisterState& regs)
|
|||
auto& child_region = child->space().add_region(region_clone.release_nonnull());
|
||||
child_region.map(child->space().page_directory(), ShouldFlushTLB::No);
|
||||
|
||||
if (®ion == m_master_tls_region.unsafe_ptr())
|
||||
if (region == m_master_tls_region.unsafe_ptr())
|
||||
child->m_master_tls_region = child_region;
|
||||
}
|
||||
|
||||
|
|
|
@ -433,13 +433,8 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
|
|||
|
||||
Region* MemoryManager::user_region_from_vaddr(Space& space, VirtualAddress vaddr)
|
||||
{
|
||||
// FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
|
||||
ScopedSpinLock lock(space.get_lock());
|
||||
for (auto& region : space.regions()) {
|
||||
if (region.contains(vaddr))
|
||||
return ®ion;
|
||||
}
|
||||
return nullptr;
|
||||
return space.find_region_containing({ vaddr, 1 });
|
||||
}
|
||||
|
||||
Region* MemoryManager::find_region_from_vaddr(Space& space, VirtualAddress vaddr)
|
||||
|
|
|
@ -127,12 +127,14 @@ OwnPtr<Region> Space::take_region(Region& region)
|
|||
|
||||
if (m_region_lookup_cache.region.unsafe_ptr() == ®ion)
|
||||
m_region_lookup_cache.region = nullptr;
|
||||
for (size_t i = 0; i < m_regions.size(); ++i) {
|
||||
if (&m_regions[i] == ®ion) {
|
||||
return m_regions.unstable_take(i);
|
||||
}
|
||||
}
|
||||
// FIXME: currently we traverse the RBTree twice, once to check if the region in the tree starting at region.vaddr()
|
||||
// is the same region and once to actually remove it, maybe we can add some kind of remove_if()?
|
||||
auto found_region = m_regions.find(region.vaddr().get());
|
||||
if (!found_region)
|
||||
return {};
|
||||
if (found_region->ptr() != ®ion)
|
||||
return {};
|
||||
return m_regions.unsafe_remove(region.vaddr().get());
|
||||
}
|
||||
|
||||
Region* Space::find_region_from_range(const Range& range)
|
||||
|
@ -141,25 +143,25 @@ Region* Space::find_region_from_range(const Range& range)
|
|||
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
|
||||
return m_region_lookup_cache.region.unsafe_ptr();
|
||||
|
||||
size_t size = page_round_up(range.size());
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vaddr() == range.base() && region.size() == size) {
|
||||
m_region_lookup_cache.range = range;
|
||||
m_region_lookup_cache.region = region;
|
||||
return ®ion;
|
||||
}
|
||||
}
|
||||
auto found_region = m_regions.find(range.base().get());
|
||||
if (!found_region)
|
||||
return nullptr;
|
||||
auto& region = *found_region;
|
||||
size_t size = page_round_up(range.size());
|
||||
if (region->size() != size)
|
||||
return nullptr;
|
||||
m_region_lookup_cache.range = range;
|
||||
m_region_lookup_cache.region = *region;
|
||||
return region;
|
||||
}
|
||||
|
||||
Region* Space::find_region_containing(const Range& range)
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
for (auto& region : m_regions) {
|
||||
if (region.contains(range))
|
||||
return ®ion;
|
||||
}
|
||||
auto candidate = m_regions.find_largest_not_above(range.base().get());
|
||||
if (!candidate)
|
||||
return nullptr;
|
||||
return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
|
||||
}
|
||||
|
||||
Vector<Region*> Space::find_regions_intersecting(const Range& range)
|
||||
|
@ -170,11 +172,14 @@ Vector<Region*> Space::find_regions_intersecting(const Range& range)
|
|||
ScopedSpinLock lock(m_lock);
|
||||
|
||||
// FIXME: Maybe take the cache from the single lookup?
|
||||
for (auto& region : m_regions) {
|
||||
if (region.range().base() < range.end() && region.range().end() > range.base()) {
|
||||
regions.append(®ion);
|
||||
auto found_region = m_regions.find_largest_not_above(range.base().get());
|
||||
if (!found_region)
|
||||
return regions;
|
||||
for (auto iter = m_regions.begin_from((*found_region)->vaddr().get()); !iter.is_end(); ++iter) {
|
||||
if ((*iter)->range().base() < range.end() && (*iter)->range().end() > range.base()) {
|
||||
regions.append(*iter);
|
||||
|
||||
total_size_collected += region.size() - region.range().intersect(range).size();
|
||||
total_size_collected += (*iter)->size() - (*iter)->range().intersect(range).size();
|
||||
if (total_size_collected == range.size())
|
||||
break;
|
||||
}
|
||||
|
@ -187,7 +192,7 @@ Region& Space::add_region(NonnullOwnPtr<Region> region)
|
|||
{
|
||||
auto* ptr = region.ptr();
|
||||
ScopedSpinLock lock(m_lock);
|
||||
m_regions.append(move(region));
|
||||
m_regions.insert(region->vaddr().get(), move(region));
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -217,15 +222,7 @@ void Space::dump_regions()
|
|||
|
||||
ScopedSpinLock lock(m_lock);
|
||||
|
||||
Vector<Region*> sorted_regions;
|
||||
sorted_regions.ensure_capacity(m_regions.size());
|
||||
for (auto& region : m_regions)
|
||||
sorted_regions.append(®ion);
|
||||
quick_sort(sorted_regions, [](auto& a, auto& b) {
|
||||
return a->vaddr() < b->vaddr();
|
||||
});
|
||||
|
||||
for (auto& sorted_region : sorted_regions) {
|
||||
for (auto& sorted_region : m_regions) {
|
||||
auto& region = *sorted_region;
|
||||
dbgln("{:08x} -- {:08x} {:08x} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
|
||||
region.is_readable() ? 'R' : ' ',
|
||||
|
@ -253,8 +250,8 @@ size_t Space::amount_dirty_private() const
|
|||
// That's probably a situation that needs to be looked at in general.
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
if (!region.is_shared())
|
||||
amount += region.amount_dirty();
|
||||
if (!region->is_shared())
|
||||
amount += region->amount_dirty();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
@ -264,8 +261,8 @@ size_t Space::amount_clean_inode() const
|
|||
ScopedSpinLock lock(m_lock);
|
||||
HashTable<const InodeVMObject*> vmobjects;
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_inode())
|
||||
vmobjects.set(&static_cast<const InodeVMObject&>(region.vmobject()));
|
||||
if (region->vmobject().is_inode())
|
||||
vmobjects.set(&static_cast<const InodeVMObject&>(region->vmobject()));
|
||||
}
|
||||
size_t amount = 0;
|
||||
for (auto& vmobject : vmobjects)
|
||||
|
@ -278,7 +275,7 @@ size_t Space::amount_virtual() const
|
|||
ScopedSpinLock lock(m_lock);
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
amount += region.size();
|
||||
amount += region->size();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
@ -289,7 +286,7 @@ size_t Space::amount_resident() const
|
|||
// FIXME: This will double count if multiple regions use the same physical page.
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
amount += region.amount_resident();
|
||||
amount += region->amount_resident();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
@ -303,7 +300,7 @@ size_t Space::amount_shared() const
|
|||
// so that every Region contributes +1 ref to each of its PhysicalPages.
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
amount += region.amount_shared();
|
||||
amount += region->amount_shared();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
@ -313,8 +310,8 @@ size_t Space::amount_purgeable_volatile() const
|
|||
ScopedSpinLock lock(m_lock);
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_anonymous() && static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
|
||||
amount += region.amount_resident();
|
||||
if (region->vmobject().is_anonymous() && static_cast<const AnonymousVMObject&>(region->vmobject()).is_any_volatile())
|
||||
amount += region->amount_resident();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
@ -324,8 +321,8 @@ size_t Space::amount_purgeable_nonvolatile() const
|
|||
ScopedSpinLock lock(m_lock);
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_anonymous() && !static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
|
||||
amount += region.amount_resident();
|
||||
if (region->vmobject().is_anonymous() && !static_cast<const AnonymousVMObject&>(region->vmobject()).is_any_volatile())
|
||||
amount += region->amount_resident();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <AK/NonnullOwnPtrVector.h>
|
||||
#include <AK/RedBlackTree.h>
|
||||
#include <AK/Vector.h>
|
||||
#include <AK/WeakPtr.h>
|
||||
#include <Kernel/UnixTypes.h>
|
||||
|
@ -48,8 +48,8 @@ public:
|
|||
|
||||
size_t region_count() const { return m_regions.size(); }
|
||||
|
||||
NonnullOwnPtrVector<Region>& regions() { return m_regions; }
|
||||
const NonnullOwnPtrVector<Region>& regions() const { return m_regions; }
|
||||
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() { return m_regions; }
|
||||
const RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() const { return m_regions; }
|
||||
|
||||
void dump_regions();
|
||||
|
||||
|
@ -91,7 +91,7 @@ private:
|
|||
|
||||
RefPtr<PageDirectory> m_page_directory;
|
||||
|
||||
NonnullOwnPtrVector<Region> m_regions;
|
||||
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions;
|
||||
|
||||
struct RegionLookupCache {
|
||||
Optional<Range> range;
|
||||
|
|
Loading…
Reference in a new issue