2021-02-08 14:45:40 +00:00
|
|
|
/*
|
2022-04-02 18:01:29 +00:00
|
|
|
* Copyright (c) 2021-2022, Andreas Kling <kling@serenityos.org>
|
2021-02-24 15:44:00 +00:00
|
|
|
* Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
|
2021-02-08 14:45:40 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2021-02-08 14:45:40 +00:00
|
|
|
*/
|
|
|
|
|
2022-04-02 18:01:29 +00:00
|
|
|
#include <Kernel/API/MemoryLayout.h>
|
2022-04-02 22:47:27 +00:00
|
|
|
#include <Kernel/Arch/CPU.h>
|
2021-08-21 23:37:17 +00:00
|
|
|
#include <Kernel/Locking/Spinlock.h>
|
2021-08-06 11:57:39 +00:00
|
|
|
#include <Kernel/Memory/AddressSpace.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/AnonymousVMObject.h>
|
|
|
|
#include <Kernel/Memory/InodeVMObject.h>
|
|
|
|
#include <Kernel/Memory/MemoryManager.h>
|
2021-05-28 09:03:21 +00:00
|
|
|
#include <Kernel/PerformanceManager.h>
|
2021-02-08 14:45:40 +00:00
|
|
|
#include <Kernel/Process.h>
|
2022-04-02 18:01:29 +00:00
|
|
|
#include <Kernel/Random.h>
|
2022-01-29 12:08:37 +00:00
|
|
|
#include <Kernel/Scheduler.h>
|
2021-02-08 14:45:40 +00:00
|
|
|
|
2021-08-06 11:49:36 +00:00
|
|
|
namespace Kernel::Memory {
|
2021-02-08 14:45:40 +00:00
|
|
|
|
2023-04-04 18:06:14 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(Process& process, AddressSpace const* parent)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2023-04-04 18:06:14 +00:00
|
|
|
auto page_directory = TRY(PageDirectory::try_create_for_userspace(process));
|
2022-04-02 18:01:29 +00:00
|
|
|
|
|
|
|
VirtualRange total_range = [&]() -> VirtualRange {
|
|
|
|
if (parent)
|
2022-08-23 18:30:12 +00:00
|
|
|
return parent->m_region_tree.total_range();
|
2022-04-02 18:01:29 +00:00
|
|
|
constexpr FlatPtr userspace_range_base = USER_RANGE_BASE;
|
|
|
|
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
|
2022-06-04 17:59:34 +00:00
|
|
|
size_t random_offset = (get_fast_random<u8>() % 2 * MiB) & PAGE_MASK;
|
2022-04-02 18:01:29 +00:00
|
|
|
FlatPtr base = userspace_range_base + random_offset;
|
|
|
|
return VirtualRange(VirtualAddress { base }, userspace_range_ceiling - base);
|
|
|
|
}();
|
|
|
|
|
2023-04-04 18:06:14 +00:00
|
|
|
return adopt_nonnull_own_or_enomem(new (nothrow) AddressSpace(move(page_directory), total_range));
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
AddressSpace::AddressSpace(NonnullLockRefPtr<PageDirectory> page_directory, VirtualRange total_range)
|
2021-08-07 19:32:30 +00:00
|
|
|
: m_page_directory(move(page_directory))
|
2022-08-23 18:30:12 +00:00
|
|
|
, m_region_tree(total_range)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-04-02 19:12:05 +00:00
|
|
|
AddressSpace::~AddressSpace() = default;
|
2021-02-08 14:45:40 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
|
2021-05-28 09:03:21 +00:00
|
|
|
{
|
|
|
|
if (!size)
|
|
|
|
return EINVAL;
|
|
|
|
|
2021-09-05 12:40:06 +00:00
|
|
|
auto range_to_unmap = TRY(VirtualRange::expand_to_page_boundaries(addr.get(), size));
|
2021-05-28 09:03:21 +00:00
|
|
|
|
|
|
|
if (!is_user_range(range_to_unmap))
|
|
|
|
return EFAULT;
|
|
|
|
|
|
|
|
if (auto* whole_region = find_region_from_range(range_to_unmap)) {
|
|
|
|
if (!whole_region->is_mmap())
|
|
|
|
return EPERM;
|
2022-12-15 19:08:57 +00:00
|
|
|
if (whole_region->is_immutable())
|
|
|
|
return EPERM;
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-08-19 19:45:07 +00:00
|
|
|
PerformanceManager::add_unmap_perf_event(Process::current(), whole_region->range());
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-07-17 13:07:02 +00:00
|
|
|
deallocate_region(*whole_region);
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (auto* old_region = find_region_containing(range_to_unmap)) {
|
|
|
|
if (!old_region->is_mmap())
|
|
|
|
return EPERM;
|
2022-12-15 19:08:57 +00:00
|
|
|
if (old_region->is_immutable())
|
|
|
|
return EPERM;
|
2021-05-28 09:03:21 +00:00
|
|
|
|
|
|
|
// Remove the old region from our regions tree, since were going to add another region
|
2022-04-04 21:36:09 +00:00
|
|
|
// with the exact same start address.
|
2021-05-28 09:03:21 +00:00
|
|
|
auto region = take_region(*old_region);
|
2022-04-04 21:36:09 +00:00
|
|
|
region->unmap();
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-09-05 12:40:06 +00:00
|
|
|
auto new_regions = TRY(try_split_region_around_range(*region, range_to_unmap));
|
2021-05-28 09:03:21 +00:00
|
|
|
|
|
|
|
// And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
|
|
|
|
for (auto* new_region : new_regions) {
|
2021-08-24 19:53:47 +00:00
|
|
|
// TODO: Ideally we should do this in a way that can be rolled back on failure, as failing here
|
|
|
|
// leaves the caller in an undefined state.
|
2021-09-06 10:52:23 +00:00
|
|
|
TRY(new_region->map(page_directory()));
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
2021-08-19 19:45:07 +00:00
|
|
|
PerformanceManager::add_unmap_perf_event(Process::current(), range_to_unmap);
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
2021-07-27 13:04:04 +00:00
|
|
|
// Try again while checking multiple regions at a time.
|
2022-01-25 13:33:48 +00:00
|
|
|
auto const& regions = TRY(find_regions_intersecting(range_to_unmap));
|
2021-07-30 08:17:59 +00:00
|
|
|
if (regions.is_empty())
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-07-27 13:04:04 +00:00
|
|
|
// Check if any of the regions is not mmap'ed, to not accidentally
|
|
|
|
// error out with just half a region map left.
|
2021-05-28 09:03:21 +00:00
|
|
|
for (auto* region : regions) {
|
|
|
|
if (!region->is_mmap())
|
|
|
|
return EPERM;
|
2022-12-15 19:08:57 +00:00
|
|
|
if (region->is_immutable())
|
|
|
|
return EPERM;
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Vector<Region*, 2> new_regions;
|
|
|
|
|
|
|
|
for (auto* old_region : regions) {
|
2021-07-27 13:04:04 +00:00
|
|
|
// If it's a full match we can remove the entire old region.
|
2021-05-28 09:03:21 +00:00
|
|
|
if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
|
2021-07-17 13:07:02 +00:00
|
|
|
deallocate_region(*old_region);
|
2021-05-28 09:03:21 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the old region from our regions tree, since were going to add another region
|
2022-04-04 21:36:09 +00:00
|
|
|
// with the exact same start address.
|
2021-05-28 09:03:21 +00:00
|
|
|
auto region = take_region(*old_region);
|
2022-04-04 21:36:09 +00:00
|
|
|
region->unmap();
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-07-27 13:04:04 +00:00
|
|
|
// Otherwise, split the regions and collect them for future mapping.
|
2021-09-05 12:40:06 +00:00
|
|
|
auto split_regions = TRY(try_split_region_around_range(*region, range_to_unmap));
|
2021-11-10 10:55:37 +00:00
|
|
|
TRY(new_regions.try_extend(split_regions));
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
2021-07-27 13:04:04 +00:00
|
|
|
|
|
|
|
// And finally map the new region(s) into our page directory.
|
2021-05-28 09:03:21 +00:00
|
|
|
for (auto* new_region : new_regions) {
|
2021-08-24 19:53:47 +00:00
|
|
|
// TODO: Ideally we should do this in a way that can be rolled back on failure, as failing here
|
|
|
|
// leaves the caller in an undefined state.
|
2021-09-06 10:52:23 +00:00
|
|
|
TRY(new_region->map(page_directory()));
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
2021-08-19 19:45:07 +00:00
|
|
|
PerformanceManager::add_unmap_perf_event(Process::current(), range_to_unmap);
|
2021-05-28 09:03:21 +00:00
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
return {};
|
2021-05-28 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2021-09-06 17:24:54 +00:00
|
|
|
OwnPtr<KString> region_name;
|
|
|
|
if (!source_region.name().is_null())
|
|
|
|
region_name = TRY(KString::try_create(source_region.name()));
|
|
|
|
|
2022-04-03 15:12:39 +00:00
|
|
|
auto new_region = TRY(Region::create_unplaced(
|
|
|
|
source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
|
2022-02-10 17:55:10 +00:00
|
|
|
new_region->set_syscall_region(source_region.is_syscall_region());
|
2022-08-23 18:18:39 +00:00
|
|
|
new_region->set_mmap(source_region.is_mmap(), source_region.mmapped_from_readable(), source_region.mmapped_from_writable());
|
2022-02-10 17:55:10 +00:00
|
|
|
new_region->set_stack(source_region.is_stack());
|
2021-02-08 14:45:40 +00:00
|
|
|
size_t page_offset_in_source_region = (offset_in_vmobject - source_region.offset_in_vmobject()) / PAGE_SIZE;
|
2022-02-10 17:55:10 +00:00
|
|
|
for (size_t i = 0; i < new_region->page_count(); ++i) {
|
2021-02-08 14:45:40 +00:00
|
|
|
if (source_region.should_cow(page_offset_in_source_region + i))
|
2022-02-10 17:55:10 +00:00
|
|
|
TRY(new_region->set_should_cow(i, true));
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
2022-08-23 18:30:12 +00:00
|
|
|
TRY(m_region_tree.place_specifically(*new_region, range));
|
2022-04-03 15:12:39 +00:00
|
|
|
return new_region.leak_ptr();
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2022-04-03 16:46:27 +00:00
|
|
|
ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize_virtual_address, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot, AllocationStrategy strategy)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2022-04-03 15:12:39 +00:00
|
|
|
if (!requested_address.is_page_aligned())
|
|
|
|
return EINVAL;
|
|
|
|
auto size = TRY(Memory::page_round_up(requested_size));
|
|
|
|
auto alignment = TRY(Memory::page_round_up(requested_alignment));
|
2021-09-06 17:24:54 +00:00
|
|
|
OwnPtr<KString> region_name;
|
|
|
|
if (!name.is_null())
|
|
|
|
region_name = TRY(KString::try_create(name));
|
2022-04-03 15:12:39 +00:00
|
|
|
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
|
|
|
|
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
|
2022-08-23 18:30:12 +00:00
|
|
|
if (requested_address.is_null()) {
|
|
|
|
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
|
|
|
|
} else {
|
|
|
|
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
|
|
|
|
}
|
2022-01-12 20:47:23 +00:00
|
|
|
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
2022-04-03 15:12:39 +00:00
|
|
|
return region.leak_ptr();
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange requested_range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2022-04-03 16:46:27 +00:00
|
|
|
return allocate_region_with_vmobject(RandomizeVirtualAddress::Yes, requested_range.base(), requested_range.size(), PAGE_SIZE, move(vmobject), offset_in_vmobject, name, prot, shared);
|
2022-04-03 15:12:39 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 18:53:40 +00:00
|
|
|
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAddress randomize_virtual_address, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
2022-04-03 15:12:39 +00:00
|
|
|
{
|
|
|
|
if (!requested_address.is_page_aligned())
|
2021-02-08 14:45:40 +00:00
|
|
|
return EINVAL;
|
2022-04-03 15:12:39 +00:00
|
|
|
auto size = TRY(page_round_up(requested_size));
|
|
|
|
auto alignment = TRY(page_round_up(requested_alignment));
|
|
|
|
|
|
|
|
if (Checked<size_t>::addition_would_overflow(offset_in_vmobject, requested_size))
|
|
|
|
return EOVERFLOW;
|
|
|
|
|
|
|
|
size_t end_in_vmobject = offset_in_vmobject + requested_size;
|
2021-02-08 14:45:40 +00:00
|
|
|
if (offset_in_vmobject >= vmobject->size()) {
|
|
|
|
dbgln("allocate_region_with_vmobject: Attempt to allocate a region with an offset past the end of its VMObject.");
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
if (end_in_vmobject > vmobject->size()) {
|
|
|
|
dbgln("allocate_region_with_vmobject: Attempt to allocate a region with an end past the end of its VMObject.");
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
offset_in_vmobject &= PAGE_MASK;
|
2021-09-06 17:24:54 +00:00
|
|
|
OwnPtr<KString> region_name;
|
|
|
|
if (!name.is_null())
|
|
|
|
region_name = TRY(KString::try_create(name));
|
2022-04-03 15:12:39 +00:00
|
|
|
|
|
|
|
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
|
|
|
|
|
2022-08-23 18:30:12 +00:00
|
|
|
if (requested_address.is_null())
|
|
|
|
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
|
|
|
|
else
|
|
|
|
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
|
|
|
|
|
|
|
|
ArmedScopeGuard remove_region_from_tree_on_failure = [&] {
|
|
|
|
// At this point the region is already part of the Process region tree, so we have to make sure
|
|
|
|
// we remove it from the tree before returning an error, or else the Region tree will contain
|
|
|
|
// a dangling pointer to the free'd Region instance
|
|
|
|
m_region_tree.remove(*region);
|
|
|
|
};
|
|
|
|
|
|
|
|
if (prot == PROT_NONE) {
|
|
|
|
// For PROT_NONE mappings, we don't have to set up any page table mappings.
|
|
|
|
// We do still need to attach the region to the page_directory though.
|
|
|
|
region->set_page_directory(page_directory());
|
|
|
|
} else {
|
|
|
|
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
|
|
|
}
|
|
|
|
remove_region_from_tree_on_failure.disarm();
|
|
|
|
return region.leak_ptr();
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
void AddressSpace::deallocate_region(Region& region)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2021-12-02 12:52:09 +00:00
|
|
|
(void)take_region(region);
|
2021-04-06 23:17:05 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
|
2021-04-06 23:17:05 +00:00
|
|
|
{
|
2022-08-23 18:30:12 +00:00
|
|
|
auto did_remove = m_region_tree.remove(region);
|
2022-04-01 23:28:01 +00:00
|
|
|
VERIFY(did_remove);
|
|
|
|
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2022-08-23 18:30:12 +00:00
|
|
|
auto* found_region = m_region_tree.regions().find(range.base().get());
|
2021-04-06 23:20:29 +00:00
|
|
|
if (!found_region)
|
|
|
|
return nullptr;
|
|
|
|
auto& region = *found_region;
|
2021-12-24 14:22:11 +00:00
|
|
|
auto rounded_range_size = page_round_up(range.size());
|
2022-04-01 23:28:01 +00:00
|
|
|
if (rounded_range_size.is_error() || region.size() != rounded_range_size.value())
|
2021-04-06 23:20:29 +00:00
|
|
|
return nullptr;
|
2022-04-01 23:28:01 +00:00
|
|
|
return ®ion;
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
Region* AddressSpace::find_region_containing(VirtualRange const& range)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2022-08-23 18:30:12 +00:00
|
|
|
return m_region_tree.find_region_containing(range);
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2022-06-19 13:27:35 +00:00
|
|
|
ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange const& range)
|
2021-02-24 15:44:00 +00:00
|
|
|
{
|
2022-06-19 13:27:35 +00:00
|
|
|
Vector<Region*, 4> regions = {};
|
2021-02-24 15:44:00 +00:00
|
|
|
size_t total_size_collected = 0;
|
|
|
|
|
2022-08-23 18:30:12 +00:00
|
|
|
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
|
|
|
|
if (!found_region)
|
|
|
|
return regions;
|
|
|
|
for (auto iter = m_region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
|
|
|
|
auto const& iter_range = (*iter).range();
|
|
|
|
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
|
|
|
|
TRY(regions.try_append(&*iter));
|
|
|
|
|
|
|
|
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
|
|
|
|
if (total_size_collected == range.size())
|
|
|
|
break;
|
2021-02-24 15:44:00 +00:00
|
|
|
}
|
2022-08-23 18:30:12 +00:00
|
|
|
}
|
2021-02-24 15:44:00 +00:00
|
|
|
|
2022-08-23 18:30:12 +00:00
|
|
|
return regions;
|
2021-02-24 15:44:00 +00:00
|
|
|
}
|
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
// Carve out a virtual address range from a region and return the two regions on either side
|
2022-04-01 17:58:27 +00:00
|
|
|
ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(Region const& source_region, VirtualRange const& desired_range)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2021-08-06 11:54:48 +00:00
|
|
|
VirtualRange old_region_range = source_region.range();
|
2021-02-08 14:45:40 +00:00
|
|
|
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
|
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!remaining_ranges_after_unmap.is_empty());
|
2021-11-07 23:51:39 +00:00
|
|
|
auto try_make_replacement_region = [&](VirtualRange const& new_range) -> ErrorOr<Region*> {
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(old_region_range.contains(new_range));
|
2021-02-08 14:45:40 +00:00
|
|
|
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
|
2021-07-11 16:51:54 +00:00
|
|
|
return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);
|
2021-02-08 14:45:40 +00:00
|
|
|
};
|
|
|
|
Vector<Region*, 2> new_regions;
|
|
|
|
for (auto& new_range : remaining_ranges_after_unmap) {
|
2021-12-28 18:54:05 +00:00
|
|
|
auto* new_region = TRY(try_make_replacement_region(new_range));
|
2021-09-05 12:40:06 +00:00
|
|
|
new_regions.unchecked_append(new_region);
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
return new_regions;
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
void AddressSpace::dump_regions()
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2021-02-12 15:33:58 +00:00
|
|
|
dbgln("Process regions:");
|
2021-12-28 18:54:05 +00:00
|
|
|
char const* addr_padding = " ";
|
2021-07-21 23:21:39 +00:00
|
|
|
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
|
|
|
|
addr_padding, addr_padding, addr_padding);
|
2021-02-08 14:45:40 +00:00
|
|
|
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
|
|
|
|
region.is_readable() ? 'R' : ' ',
|
|
|
|
region.is_writable() ? 'W' : ' ',
|
|
|
|
region.is_executable() ? 'X' : ' ',
|
|
|
|
region.is_shared() ? 'S' : ' ',
|
|
|
|
region.is_stack() ? 'T' : ' ',
|
|
|
|
region.is_syscall_region() ? 'C' : ' ',
|
|
|
|
region.name());
|
|
|
|
}
|
2021-02-08 14:45:40 +00:00
|
|
|
MM.dump_kernel_regions();
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
void AddressSpace::remove_all_regions(Badge<Process>)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2022-01-12 12:48:43 +00:00
|
|
|
VERIFY(Thread::current() == g_finalizer);
|
2022-01-30 15:07:59 +00:00
|
|
|
{
|
|
|
|
SpinlockLocker pd_locker(m_page_directory->get_lock());
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto& region : m_region_tree.regions())
|
|
|
|
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker);
|
2022-01-30 15:07:59 +00:00
|
|
|
}
|
2022-04-01 23:28:01 +00:00
|
|
|
|
2022-08-23 18:30:12 +00:00
|
|
|
m_region_tree.delete_all_regions_assuming_they_are_unmapped();
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
size_t AddressSpace::amount_dirty_private() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
|
|
|
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
|
|
|
|
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
|
|
|
|
// That's probably a situation that needs to be looked at in general.
|
|
|
|
size_t amount = 0;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
if (!region.is_shared())
|
|
|
|
amount += region.amount_dirty();
|
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2022-01-25 13:02:37 +00:00
|
|
|
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
2022-08-23 10:28:04 +00:00
|
|
|
HashTable<LockRefPtr<InodeVMObject>> vmobjects;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
if (region.vmobject().is_inode())
|
|
|
|
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
|
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
size_t amount = 0;
|
|
|
|
for (auto& vmobject : vmobjects)
|
|
|
|
amount += vmobject->amount_clean();
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
size_t AddressSpace::amount_virtual() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
2021-02-08 21:18:26 +00:00
|
|
|
size_t amount = 0;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
amount += region.size();
|
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
size_t AddressSpace::amount_resident() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
|
|
|
// FIXME: This will double count if multiple regions use the same physical page.
|
|
|
|
size_t amount = 0;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
amount += region.amount_resident();
|
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
size_t AddressSpace::amount_shared() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
|
|
|
// FIXME: This will double count if multiple regions use the same physical page.
|
|
|
|
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
|
|
|
|
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
|
|
|
|
// so that every Region contributes +1 ref to each of its PhysicalPages.
|
|
|
|
size_t amount = 0;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
amount += region.amount_shared();
|
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
size_t AddressSpace::amount_purgeable_volatile() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
2021-02-08 21:18:26 +00:00
|
|
|
size_t amount = 0;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
|
|
|
if (!region.vmobject().is_anonymous())
|
|
|
|
continue;
|
|
|
|
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
|
|
|
|
if (vmobject.is_purgeable() && vmobject.is_volatile())
|
|
|
|
amount += region.amount_resident();
|
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
size_t AddressSpace::amount_purgeable_nonvolatile() const
|
2021-02-08 21:11:10 +00:00
|
|
|
{
|
2021-02-08 21:18:26 +00:00
|
|
|
size_t amount = 0;
|
2022-08-23 18:30:12 +00:00
|
|
|
for (auto const& region : m_region_tree.regions()) {
|
2022-04-01 23:28:01 +00:00
|
|
|
if (!region.vmobject().is_anonymous())
|
2021-07-24 23:46:44 +00:00
|
|
|
continue;
|
2022-04-01 23:28:01 +00:00
|
|
|
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
|
2021-07-24 23:46:44 +00:00
|
|
|
if (vmobject.is_purgeable() && !vmobject.is_volatile())
|
2022-04-01 23:28:01 +00:00
|
|
|
amount += region.amount_resident();
|
2022-08-23 18:30:12 +00:00
|
|
|
}
|
2021-02-08 21:11:10 +00:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|