Kernel: Wrap RegionTree objects in SpinlockProtected

This makes locking them much more straightforward, and we can remove
a bunch of confusing use of AddressSpace::m_lock. That lock will also
be converted to use of SpinlockProtected in a subsequent patch.
This commit is contained in:
Andreas Kling 2022-08-23 12:28:04 +02:00
parent 352d6545a9
commit dc9d2c1b10
Notes: sideshowbarker 2024-07-17 09:39:38 +09:00
9 changed files with 251 additions and 234 deletions

View file

@ -46,16 +46,18 @@ Coredump::Coredump(NonnullLockRefPtr<Process> process, NonnullLockRefPtr<OpenFil
, m_description(move(description))
{
m_num_program_headers = 0;
for ([[maybe_unused]] auto& region : m_process->address_space().regions()) {
m_process->address_space().region_tree().with([&](auto& region_tree) {
for (auto& region : region_tree.regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
++m_num_program_headers;
}
if (region.access() == Memory::Region::Access::None)
continue;
++m_num_program_headers;
}
});
++m_num_program_headers; // +1 for NOTE segment
}
@ -133,37 +135,39 @@ ErrorOr<void> Coredump::write_elf_header()
ErrorOr<void> Coredump::write_program_headers(size_t notes_size)
{
size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr));
for (auto& region : m_process->address_space().regions()) {
m_process->address_space().region_tree().with([&](auto& region_tree) {
for (auto& region : region_tree.regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
if (region.access() == Memory::Region::Access::None)
continue;
ElfW(Phdr) phdr {};
ElfW(Phdr) phdr {};
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = region.vaddr().get();
phdr.p_paddr = 0;
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = region.vaddr().get();
phdr.p_paddr = 0;
phdr.p_filesz = region.page_count() * PAGE_SIZE;
phdr.p_memsz = region.page_count() * PAGE_SIZE;
phdr.p_align = 0;
phdr.p_filesz = region.page_count() * PAGE_SIZE;
phdr.p_memsz = region.page_count() * PAGE_SIZE;
phdr.p_align = 0;
phdr.p_flags = region.is_readable() ? PF_R : 0;
if (region.is_writable())
phdr.p_flags |= PF_W;
if (region.is_executable())
phdr.p_flags |= PF_X;
phdr.p_flags = region.is_readable() ? PF_R : 0;
if (region.is_writable())
phdr.p_flags |= PF_W;
if (region.is_executable())
phdr.p_flags |= PF_X;
offset += phdr.p_filesz;
offset += phdr.p_filesz;
[[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
}
[[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
}
});
ElfW(Phdr) notes_pheader {};
notes_pheader.p_type = PT_NOTE;
@ -184,36 +188,38 @@ ErrorOr<void> Coredump::write_regions()
{
u8 zero_buffer[PAGE_SIZE] = {};
for (auto& region : m_process->address_space().regions()) {
VERIFY(!region.is_kernel());
return m_process->address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
for (auto& region : region_tree.regions()) {
VERIFY(!region.is_kernel());
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
if (region.access() == Memory::Region::Access::None)
continue;
// If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
if (!region.is_mapped())
continue;
// If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
if (!region.is_mapped())
continue;
region.set_readable(true);
region.remap();
region.set_readable(true);
region.remap();
for (size_t i = 0; i < region.page_count(); i++) {
auto page = region.physical_page(i);
auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
if (page)
return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
// If the current page is not backed by a physical page, we zero it in the coredump file.
return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
}();
TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
for (size_t i = 0; i < region.page_count(); i++) {
auto page = region.physical_page(i);
auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
if (page)
return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
// If the current page is not backed by a physical page, we zero it in the coredump file.
return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
}();
TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
}
}
}
return {};
return {};
});
}
ErrorOr<void> Coredump::write_notes_segment(ReadonlyBytes notes_segment)
@ -273,33 +279,35 @@ ErrorOr<void> Coredump::create_notes_threads_data(auto& builder) const
ErrorOr<void> Coredump::create_notes_regions_data(auto& builder) const
{
size_t region_index = 0;
for (auto const& region : m_process->address_space().regions()) {
return m_process->address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
for (auto const& region : region_tree.regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
if (region.access() == Memory::Region::Access::None)
continue;
ELF::Core::MemoryRegionInfo info {};
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
ELF::Core::MemoryRegionInfo info {};
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
info.region_start = region.vaddr().get();
info.region_end = region.vaddr().offset(region.size()).get();
info.program_header_index = region_index++;
info.region_start = region.vaddr().get();
info.region_end = region.vaddr().offset(region.size()).get();
info.program_header_index = region_index++;
TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
// NOTE: The region name *is* null-terminated, so the following is ok:
auto name = region.name();
if (name.is_empty())
TRY(builder.append('\0'));
else
TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
}
return {};
// NOTE: The region name *is* null-terminated, so the following is ok:
auto name = region.name();
if (name.is_empty())
TRY(builder.append('\0'));
else
TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
}
return {};
});
}
ErrorOr<void> Coredump::create_notes_metadata_data(auto& builder) const

View file

@ -25,7 +25,7 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
VirtualRange total_range = [&]() -> VirtualRange {
if (parent)
return parent->m_region_tree.total_range();
return parent->m_total_range;
constexpr FlatPtr userspace_range_base = USER_RANGE_BASE;
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
size_t random_offset = (get_fast_random<u8>() % 2 * MiB) & PAGE_MASK;
@ -40,7 +40,8 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
AddressSpace::AddressSpace(NonnullLockRefPtr<PageDirectory> page_directory, VirtualRange total_range)
: m_page_directory(move(page_directory))
, m_region_tree(total_range)
, m_total_range(total_range)
, m_region_tree(LockRank::None, total_range)
{
}
@ -148,8 +149,10 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
if (source_region.should_cow(page_offset_in_source_region + i))
TRY(new_region->set_should_cow(i, true));
}
SpinlockLocker locker(m_lock);
TRY(m_region_tree.place_specifically(*new_region, range));
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
TRY(region_tree.place_specifically(*new_region, range));
return {};
}));
return new_region.leak_ptr();
}
@ -164,11 +167,14 @@ ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize
region_name = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
if (requested_address.is_null()) {
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
} else {
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
}
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
if (requested_address.is_null()) {
TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
} else {
TRY(region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
}
return {};
}));
TRY(region->map(page_directory(), ShouldFlushTLB::No));
return region.leak_ptr();
}
@ -204,29 +210,29 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAdd
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
SpinlockLocker locker(m_lock);
return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Region*> {
if (requested_address.is_null())
TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
else
TRY(region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
if (requested_address.is_null())
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
else
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
ArmedScopeGuard remove_region_from_tree_on_failure = [&] {
// At this point the region is already part of the Process region tree, so we have to make sure
// we remove it from the tree before returning an error, or else the Region tree will contain
// a dangling pointer to the free'd Region instance
region_tree.remove(*region);
};
ArmedScopeGuard remove_region_from_tree_on_failure = [this, &region]() {
// At this point the region is already part of the Process region tree, so we have to make sure
// we remove it from the tree before returning an error, or else the Region tree will contain
// a dangling pointer to the free'd Region instance
m_region_tree.remove(*region);
};
if (prot == PROT_NONE) {
// For PROT_NONE mappings, we don't have to set up any page table mappings.
// We do still need to attach the region to the page_directory though.
region->set_page_directory(page_directory());
} else {
TRY(region->map(page_directory(), ShouldFlushTLB::No));
}
remove_region_from_tree_on_failure.disarm();
return region.leak_ptr();
if (prot == PROT_NONE) {
// For PROT_NONE mappings, we don't have to set up any page table mappings.
// We do still need to attach the region to the page_directory though.
region->set_page_directory(page_directory());
} else {
TRY(region->map(page_directory(), ShouldFlushTLB::No));
}
remove_region_from_tree_on_failure.disarm();
return region.leak_ptr();
});
}
void AddressSpace::deallocate_region(Region& region)
@ -236,16 +242,14 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
auto did_remove = m_region_tree.remove(region);
auto did_remove = m_region_tree.with([&](auto& region_tree) { return region_tree.remove(region); });
VERIFY(did_remove);
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
}
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
auto* found_region = m_region_tree.regions().find(range.base().get());
auto* found_region = m_region_tree.with([&](auto& region_tree) { return region_tree.regions().find(range.base().get()); });
if (!found_region)
return nullptr;
auto& region = *found_region;
@ -257,7 +261,9 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
return m_region_tree.find_region_containing(range);
return m_region_tree.with([&](auto& region_tree) {
return region_tree.find_region_containing(range);
});
}
ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange const& range)
@ -265,24 +271,23 @@ ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange
Vector<Region*, 4> regions = {};
size_t total_size_collected = 0;
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Vector<Region*, 4>> {
auto* found_region = region_tree.regions().find_largest_not_above(range.base().get());
if (!found_region)
return regions;
for (auto iter = region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
auto const& iter_range = (*iter).range();
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
TRY(regions.try_append(&*iter));
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
if (!found_region)
return regions;
for (auto iter = m_region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
auto const& iter_range = (*iter).range();
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
TRY(regions.try_append(&*iter));
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
if (total_size_collected == range.size())
break;
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
if (total_size_collected == range.size())
break;
}
}
}
return regions;
return regions;
});
}
// Carve out a virtual address range from a region and return the two regions on either side
@ -316,61 +321,63 @@ void AddressSpace::dump_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto const& region : m_region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
region.is_executable() ? 'X' : ' ',
region.is_shared() ? 'S' : ' ',
region.is_stack() ? 'T' : ' ',
region.is_syscall_region() ? 'C' : ' ',
region.name());
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
region.is_executable() ? 'X' : ' ',
region.is_shared() ? 'S' : ' ',
region.is_stack() ? 'T' : ' ',
region.is_syscall_region() ? 'C' : ' ',
region.name());
}
});
MM.dump_kernel_regions();
}
void AddressSpace::remove_all_regions(Badge<Process>)
{
VERIFY(Thread::current() == g_finalizer);
SpinlockLocker locker(m_lock);
{
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto& region : m_region_tree.regions())
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
m_region_tree.with([&](auto& region_tree) {
for (auto& region : region_tree.regions())
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
});
}
m_region_tree.delete_all_regions_assuming_they_are_unmapped();
m_region_tree.with([&](auto& region_tree) {
region_tree.delete_all_regions_assuming_they_are_unmapped();
});
}
size_t AddressSpace::amount_dirty_private() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
if (!region.is_shared())
amount += region.amount_dirty();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
if (!region.is_shared())
amount += region.amount_dirty();
}
});
return amount;
}
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
HashTable<InodeVMObject const*> vmobjects;
for (auto const& region : m_region_tree.regions()) {
if (region.vmobject().is_inode())
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
}
HashTable<LockRefPtr<InodeVMObject>> vmobjects;
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
for (auto const& region : region_tree.regions()) {
if (region.vmobject().is_inode())
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
}
return {};
}));
size_t amount = 0;
for (auto& vmobject : vmobjects)
amount += vmobject->amount_clean();
@ -379,69 +386,68 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
size_t AddressSpace::amount_virtual() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.size();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
amount += region.size();
}
});
return amount;
}
size_t AddressSpace::amount_resident() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.amount_resident();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
amount += region.amount_resident();
}
});
return amount;
}
size_t AddressSpace::amount_shared() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
// so that every Region contributes +1 ref to each of its PhysicalPages.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.amount_shared();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
amount += region.amount_shared();
}
});
return amount;
}
size_t AddressSpace::amount_purgeable_volatile() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && vmobject.is_volatile())
amount += region.amount_resident();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && vmobject.is_volatile())
amount += region.amount_resident();
}
});
return amount;
}
size_t AddressSpace::amount_purgeable_nonvolatile() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && !vmobject.is_volatile())
amount += region.amount_resident();
}
} });
return amount;
}

View file

@ -10,6 +10,7 @@
#include <AK/RedBlackTree.h>
#include <AK/Vector.h>
#include <Kernel/Library/LockWeakPtr.h>
#include <Kernel/Locking/SpinlockProtected.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/Region.h>
@ -26,8 +27,8 @@ public:
PageDirectory& page_directory() { return *m_page_directory; }
PageDirectory const& page_directory() const { return *m_page_directory; }
auto& regions() { return m_region_tree.regions(); }
auto const& regions() const { return m_region_tree.regions(); }
SpinlockProtected<RegionTree>& region_tree() { return m_region_tree; }
SpinlockProtected<RegionTree> const& region_tree() const { return m_region_tree; }
void dump_regions();
@ -62,8 +63,6 @@ public:
size_t amount_purgeable_volatile() const;
size_t amount_purgeable_nonvolatile() const;
auto& region_tree() { return m_region_tree; }
private:
AddressSpace(NonnullLockRefPtr<PageDirectory>, VirtualRange total_range);
@ -71,7 +70,11 @@ private:
LockRefPtr<PageDirectory> m_page_directory;
RegionTree m_region_tree;
// NOTE: The total range is also in the RegionTree, but since it never changes,
// it's nice to have it in a place where we can access it without locking.
VirtualRange m_total_range;
SpinlockProtected<RegionTree> m_region_tree;
bool m_enforces_syscall_regions { false };
};

View file

@ -83,7 +83,7 @@ static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
}
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
: m_region_tree(kernel_virtual_range())
: m_region_tree(LockRank::None, kernel_virtual_range())
{
s_the = this;
@ -434,13 +434,13 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
MUST(m_region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }));
MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }); }));
}
// Allocate a virtual address range for our array
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
auto& region = *MUST(Region::create_unbacked()).leak_ptr();
MUST(m_region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE));
MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE); }));
auto range = region.range();
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
@ -643,7 +643,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress address)
if (is_user_address(address))
return nullptr;
return MM.m_region_tree.find_region_containing(address);
return MM.m_region_tree.with([&](auto& region_tree) { return region_tree.find_region_containing(address); });
}
Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
@ -747,7 +747,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -790,7 +790,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -803,7 +803,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -817,7 +817,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -1120,7 +1120,7 @@ bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vadd
void MemoryManager::unregister_kernel_region(Region& region)
{
VERIFY(region.is_kernel());
m_region_tree.remove(region);
m_region_tree.with([&](auto& region_tree) { region_tree.remove(region); });
}
void MemoryManager::dump_kernel_regions()
@ -1134,20 +1134,21 @@ void MemoryManager::dump_kernel_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(s_mm_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto const& region : m_region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
region.vaddr().get(),
region.vaddr().offset(region.size() - 1).get(),
region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
region.is_executable() ? 'X' : ' ',
region.is_shared() ? 'S' : ' ',
region.is_stack() ? 'T' : ' ',
region.is_syscall_region() ? 'C' : ' ',
region.name());
}
m_region_tree.with([&](auto& region_tree) {
for (auto& region : region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
region.vaddr().get(),
region.vaddr().offset(region.size() - 1).get(),
region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
region.is_executable() ? 'X' : ' ',
region.is_shared() ? 'S' : ' ',
region.is_stack() ? 'T' : ' ',
region.is_syscall_region() ? 'C' : ' ',
region.name());
}
});
}
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
@ -1201,7 +1202,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::create_identity_mapped_reg
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_unbacked_region_anywhere(size_t size, size_t alignment)
{
auto region = TRY(Region::create_unbacked());
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment); }));
return region;
}

View file

@ -298,7 +298,7 @@ private:
PhysicalPageEntry* m_physical_page_entries { nullptr };
size_t m_physical_page_entries_count { 0 };
RegionTree m_region_tree;
SpinlockProtected<RegionTree> m_region_tree;
Vector<UsedMemoryRange> m_used_memory_ranges;
Vector<PhysicalMemoryRange> m_physical_memory_ranges;

View file

@ -151,7 +151,6 @@ ErrorOr<VirtualRange> RegionTree::allocate_range_randomized(size_t size, size_t
ErrorOr<void> RegionTree::place_anywhere(Region& region, RandomizeVirtualAddress randomize_virtual_address, size_t size, size_t alignment)
{
SpinlockLocker locker(m_lock);
auto range = TRY(randomize_virtual_address == RandomizeVirtualAddress::Yes ? allocate_range_randomized(size, alignment) : allocate_range_anywhere(size, alignment));
region.m_range = range;
m_regions.insert(region.vaddr().get(), region);
@ -160,7 +159,6 @@ ErrorOr<void> RegionTree::place_anywhere(Region& region, RandomizeVirtualAddress
ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const& range)
{
SpinlockLocker locker(m_lock);
auto allocated_range = TRY(allocate_range_specific(range.base(), range.size()));
region.m_range = allocated_range;
m_regions.insert(region.vaddr().get(), region);
@ -169,13 +167,11 @@ ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const&
bool RegionTree::remove(Region& region)
{
SpinlockLocker locker(m_lock);
return m_regions.remove(region.range().base().get());
}
Region* RegionTree::find_region_containing(VirtualAddress address)
{
SpinlockLocker locker(m_lock);
auto* region = m_regions.find_largest_not_above(address.get());
if (!region || !region->contains(address))
return nullptr;
@ -184,7 +180,6 @@ Region* RegionTree::find_region_containing(VirtualAddress address)
Region* RegionTree::find_region_containing(VirtualRange range)
{
SpinlockLocker lock(m_lock);
auto* region = m_regions.find_largest_not_above(range.base().get());
if (!region || !region->contains(range))
return nullptr;

View file

@ -354,12 +354,13 @@ ErrorOr<void> PerformanceEventBuffer::add_process(Process const& process, Proces
});
TRY(result);
for (auto const& region : process.address_space().regions()) {
TRY(append_with_ip_and_bp(process.pid(), 0,
0, 0, PERF_EVENT_MMAP, 0, region.range().base().get(), region.range().size(), region.name()));
}
return {};
return process.address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
for (auto const& region : region_tree.regions()) {
TRY(append_with_ip_and_bp(process.pid(), 0,
0, 0, PERF_EVENT_MMAP, 0, region.range().base().get(), region.range().size(), region.name()));
}
return {};
});
}
ErrorOr<FlatPtr> PerformanceEventBuffer::register_string(NonnullOwnPtr<KString> string)

View file

@ -267,9 +267,8 @@ ErrorOr<void> Process::procfs_get_fds_stats(KBufferBuilder& builder) const
ErrorOr<void> Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder) const
{
auto array = TRY(JsonArraySerializer<>::try_create(builder));
{
SpinlockLocker lock(address_space().get_lock());
for (auto const& region : address_space().regions()) {
TRY(address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
for (auto const& region : region_tree.regions()) {
auto current_process_credentials = Process::current().credentials();
if (!region.is_user() && !current_process_credentials->is_superuser())
continue;
@ -306,7 +305,8 @@ ErrorOr<void> Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder)
TRY(region_object.add("pagemap"sv, pagemap_builder.string_view()));
TRY(region_object.finish());
}
}
return {};
}));
TRY(array.finish());
return {};
}

View file

@ -124,17 +124,21 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
#endif
{
SpinlockLocker lock(address_space().get_lock());
for (auto& region : address_space().regions()) {
dbgln_if(FORK_DEBUG, "fork: cloning Region '{}' @ {}", region.name(), region.vaddr());
auto region_clone = TRY(region.try_clone());
TRY(region_clone->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No));
TRY(child->address_space().region_tree().place_specifically(*region_clone, region.range()));
auto* child_region = region_clone.leak_ptr();
TRY(address_space().region_tree().with([&](auto& parent_region_tree) -> ErrorOr<void> {
return child->address_space().region_tree().with([&](auto& child_region_tree) -> ErrorOr<void> {
for (auto& region : parent_region_tree.regions()) {
dbgln_if(FORK_DEBUG, "fork: cloning Region '{}' @ {}", region.name(), region.vaddr());
auto region_clone = TRY(region.try_clone());
TRY(region_clone->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No));
TRY(child_region_tree.place_specifically(*region_clone, region.range()));
auto* child_region = region_clone.leak_ptr();
if (&region == m_master_tls_region.unsafe_ptr())
child->m_master_tls_region = TRY(child_region->try_make_weak_ptr());
}
if (&region == m_master_tls_region.unsafe_ptr())
child->m_master_tls_region = TRY(child_region->try_make_weak_ptr());
}
return {};
});
}));
}
thread_finalizer_guard.disarm();
@ -151,5 +155,4 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
return child_pid;
}
}