diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp index d8df04b04de..26abf88c4ff 100644 --- a/Kernel/Arch/x86/common/Interrupts.cpp +++ b/Kernel/Arch/x86/common/Interrupts.cpp @@ -313,7 +313,7 @@ void page_fault_handler(TrapFrame* trap) }; VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() }; - if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process().space(), userspace_sp)) { + if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process().address_space(), userspace_sp)) { dbgln("Invalid stack pointer: {}", userspace_sp); handle_crash(regs, "Bad stack on page fault", SIGSTKFLT); } diff --git a/Kernel/CoreDump.cpp b/Kernel/CoreDump.cpp index ce4009fa9b1..bbba0a8d797 100644 --- a/Kernel/CoreDump.cpp +++ b/Kernel/CoreDump.cpp @@ -39,7 +39,7 @@ OwnPtr CoreDump::create(NonnullRefPtr process, const String& CoreDump::CoreDump(NonnullRefPtr process, NonnullRefPtr&& fd) : m_process(move(process)) , m_fd(move(fd)) - , m_num_program_headers(m_process->space().region_count() + 1) // +1 for NOTE segment + , m_num_program_headers(m_process->address_space().region_count() + 1) // +1 for NOTE segment { } @@ -120,7 +120,7 @@ KResult CoreDump::write_elf_header() KResult CoreDump::write_program_headers(size_t notes_size) { size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr)); - for (auto& region : m_process->space().regions()) { + for (auto& region : m_process->address_space().regions()) { ElfW(Phdr) phdr {}; phdr.p_type = PT_LOAD; @@ -161,7 +161,7 @@ KResult CoreDump::write_program_headers(size_t notes_size) KResult CoreDump::write_regions() { - for (auto& region : m_process->space().regions()) { + for (auto& region : m_process->address_space().regions()) { if (region->is_kernel()) continue; @@ -255,7 +255,7 @@ ByteBuffer CoreDump::create_notes_regions_data() const { ByteBuffer regions_data; size_t region_index = 0; - for (auto& region : m_process->space().regions()) { + for (auto& region : m_process->address_space().regions()) { ByteBuffer memory_region_info_buffer; ELF::Core::MemoryRegionInfo info {}; @@ -319,7 +319,7 @@ ByteBuffer CoreDump::create_notes_segment_data() const KResult CoreDump::write() { - ScopedSpinLock lock(m_process->space().get_lock()); + ScopedSpinLock lock(m_process->address_space().get_lock()); ProcessPagingScope scope(m_process); ByteBuffer notes_segment = create_notes_segment_data(); diff --git a/Kernel/Devices/KCOVDevice.cpp b/Kernel/Devices/KCOVDevice.cpp index 063f810b619..c1dd8b2fb0e 100644 --- a/Kernel/Devices/KCOVDevice.cpp +++ b/Kernel/Devices/KCOVDevice.cpp @@ -140,7 +140,7 @@ KResultOr KCOVDevice::mmap(Process& process, FileDescription&, return ENOBUFS; // Mmaped, before KCOV_SETBUFSIZE } - return process.space().allocate_region_with_vmobject( + return process.address_space().allocate_region_with_vmobject( range, *kcov_instance->vmobject, offset, {}, prot, shared); } diff --git a/Kernel/Devices/MemoryDevice.cpp b/Kernel/Devices/MemoryDevice.cpp index 0dce9e87d65..5650fd158e0 100644 --- a/Kernel/Devices/MemoryDevice.cpp +++ b/Kernel/Devices/MemoryDevice.cpp @@ -51,7 +51,7 @@ KResultOr MemoryDevice::mmap(Process& process, FileDescription& if (!vmobject) return ENOMEM; dbgln("MemoryDevice: Mapped physical memory at {} for range of {} bytes", viewed_address, range.size()); - return process.space().allocate_region_with_vmobject( + return process.address_space().allocate_region_with_vmobject( range, vmobject.release_nonnull(), 0, diff --git a/Kernel/FileSystem/AnonymousFile.cpp b/Kernel/FileSystem/AnonymousFile.cpp index 066c129392c..b56cd2966b3 100644 --- a/Kernel/FileSystem/AnonymousFile.cpp +++ b/Kernel/FileSystem/AnonymousFile.cpp @@ -27,7 +27,7 @@ KResultOr AnonymousFile::mmap(Process& process, FileDescription if (range.size() != m_vmobject->size()) return EINVAL; - return process.space().allocate_region_with_vmobject(range, m_vmobject, offset, {}, prot, shared); + return process.address_space().allocate_region_with_vmobject(range, m_vmobject, offset, {}, prot, shared); } } diff --git a/Kernel/FileSystem/InodeFile.cpp b/Kernel/FileSystem/InodeFile.cpp index 65b3a35efea..e0425a3d601 100644 --- a/Kernel/FileSystem/InodeFile.cpp +++ b/Kernel/FileSystem/InodeFile.cpp @@ -103,7 +103,7 @@ KResultOr InodeFile::mmap(Process& process, FileDescription& de vmobject = Memory::PrivateInodeVMObject::try_create_with_inode(inode()); if (!vmobject) return ENOMEM; - return process.space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), offset, description.absolute_path(), prot, shared); + return process.address_space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), offset, description.absolute_path(), prot, shared); } String InodeFile::absolute_path(const FileDescription& description) const diff --git a/Kernel/GlobalProcessExposed.cpp b/Kernel/GlobalProcessExposed.cpp index 87c9e936df0..80ae165b3c1 100644 --- a/Kernel/GlobalProcessExposed.cpp +++ b/Kernel/GlobalProcessExposed.cpp @@ -453,13 +453,13 @@ private: process_object.add("name", process.name()); process_object.add("executable", process.executable() ? process.executable()->absolute_path() : ""); process_object.add("tty", process.tty() ? process.tty()->tty_name() : "notty"); - process_object.add("amount_virtual", process.space().amount_virtual()); - process_object.add("amount_resident", process.space().amount_resident()); - process_object.add("amount_dirty_private", process.space().amount_dirty_private()); - process_object.add("amount_clean_inode", process.space().amount_clean_inode()); - process_object.add("amount_shared", process.space().amount_shared()); - process_object.add("amount_purgeable_volatile", process.space().amount_purgeable_volatile()); - process_object.add("amount_purgeable_nonvolatile", process.space().amount_purgeable_nonvolatile()); + process_object.add("amount_virtual", process.address_space().amount_virtual()); + process_object.add("amount_resident", process.address_space().amount_resident()); + process_object.add("amount_dirty_private", process.address_space().amount_dirty_private()); + process_object.add("amount_clean_inode", process.address_space().amount_clean_inode()); + process_object.add("amount_shared", process.address_space().amount_shared()); + process_object.add("amount_purgeable_volatile", process.address_space().amount_purgeable_volatile()); + process_object.add("amount_purgeable_nonvolatile", process.address_space().amount_purgeable_nonvolatile()); process_object.add("dumpable", process.is_dumpable()); process_object.add("kernel", process.is_kernel_process()); auto thread_array = process_object.add_array("threads"); diff --git a/Kernel/Graphics/FramebufferDevice.cpp b/Kernel/Graphics/FramebufferDevice.cpp index 2e62deaf32e..36f5ad5d7df 100644 --- a/Kernel/Graphics/FramebufferDevice.cpp +++ b/Kernel/Graphics/FramebufferDevice.cpp @@ -63,7 +63,7 @@ KResultOr FramebufferDevice::mmap(Process& process, FileDescrip } else { chosen_vmobject = m_swapped_framebuffer_vmobject; } - auto result = process.space().allocate_region_with_vmobject( + auto result = process.address_space().allocate_region_with_vmobject( range, chosen_vmobject.release_nonnull(), 0, diff --git a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp index e8e51c9e0f7..3b360d79d8e 100644 --- a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp +++ b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp @@ -259,7 +259,7 @@ KResultOr FrameBufferDevice::mmap(Process& process, FileDescrip if (vmobject.is_null()) return ENOMEM; - auto result = process.space().allocate_region_with_vmobject( + auto result = process.address_space().allocate_region_with_vmobject( range, vmobject.release_nonnull(), 0, diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 7623408abc2..0b5f5f12a27 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -671,8 +671,8 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr) auto page_directory = PageDirectory::find_by_cr3(read_cr3()); if (!page_directory) return nullptr; - VERIFY(page_directory->space()); - return find_user_region_from_vaddr(*page_directory->space(), vaddr); + VERIFY(page_directory->address_space()); + return find_user_region_from_vaddr(*page_directory->address_space(), vaddr); } PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) @@ -930,7 +930,7 @@ RefPtr MemoryManager::allocate_supervisor_physical_page() void MemoryManager::enter_process_paging_scope(Process& process) { - enter_space(process.space()); + enter_space(process.address_space()); } void MemoryManager::enter_space(AddressSpace& space) diff --git a/Kernel/Memory/PageDirectory.h b/Kernel/Memory/PageDirectory.h index 72d1249c7a4..98d3d94cf4f 100644 --- a/Kernel/Memory/PageDirectory.h +++ b/Kernel/Memory/PageDirectory.h @@ -41,8 +41,8 @@ public: VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } - AddressSpace* space() { return m_space; } - const AddressSpace* space() const { return m_space; } + AddressSpace* address_space() { return m_space; } + const AddressSpace* address_space() const { return m_space; } void set_space(Badge, AddressSpace& space) { m_space = &space; } diff --git a/Kernel/PerformanceEventBuffer.cpp b/Kernel/PerformanceEventBuffer.cpp index da5a95be923..027fae403a4 100644 --- a/Kernel/PerformanceEventBuffer.cpp +++ b/Kernel/PerformanceEventBuffer.cpp @@ -261,7 +261,7 @@ OwnPtr PerformanceEventBuffer::try_create_with_size(size void PerformanceEventBuffer::add_process(const Process& process, ProcessEventType event_type) { - ScopedSpinLock locker(process.space().get_lock()); + ScopedSpinLock locker(process.address_space().get_lock()); String executable; if (process.executable()) @@ -278,7 +278,7 @@ void PerformanceEventBuffer::add_process(const Process& process, ProcessEventTyp 0, 0, PERF_EVENT_THREAD_CREATE, 0, 0, 0, nullptr); }); - for (auto& region : process.space().regions()) { + for (auto& region : process.address_space().regions()) { [[maybe_unused]] auto rc = append_with_ip_and_bp(process.pid(), 0, 0, 0, PERF_EVENT_MMAP, 0, region->range().base().get(), region->range().size(), region->name()); } diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 1a73da758d0..5b71443cf4b 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -267,7 +267,7 @@ Process::Process(const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool KResult Process::attach_resources(RefPtr& first_thread, Process* fork_parent) { - m_space = Memory::AddressSpace::try_create(*this, fork_parent ? &fork_parent->space() : nullptr); + m_space = Memory::AddressSpace::try_create(*this, fork_parent ? &fork_parent->address_space() : nullptr); if (!m_space) return ENOMEM; @@ -394,7 +394,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory) m_termination_signal = signal; } set_dump_core(!out_of_memory); - space().dump_regions(); + address_space().dump_regions(); VERIFY(is_user_process()); die(); // We can not return from here, as there is nowhere diff --git a/Kernel/Process.h b/Kernel/Process.h index b298fa71894..fc44d8ec69a 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -515,8 +515,8 @@ public: PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; } - Memory::AddressSpace& space() { return *m_space; } - Memory::AddressSpace const& space() const { return *m_space; } + Memory::AddressSpace& address_space() { return *m_space; } + Memory::AddressSpace const& address_space() const { return *m_space; } VirtualAddress signal_trampoline() const { return m_signal_trampoline; } diff --git a/Kernel/ProcessSpecificExposed.cpp b/Kernel/ProcessSpecificExposed.cpp index 9685cfd33a7..3cf220cd7c2 100644 --- a/Kernel/ProcessSpecificExposed.cpp +++ b/Kernel/ProcessSpecificExposed.cpp @@ -443,8 +443,8 @@ private: return false; JsonArraySerializer array { builder }; { - ScopedSpinLock lock(process->space().get_lock()); - for (auto& region : process->space().regions()) { + ScopedSpinLock lock(process->address_space().get_lock()); + for (auto& region : process->address_space().regions()) { if (!region->is_user() && !Process::current()->is_superuser()) continue; auto region_object = array.add_object(); diff --git a/Kernel/Syscall.cpp b/Kernel/Syscall.cpp index e9fd4e33919..58f6e80f1ad 100644 --- a/Kernel/Syscall.cpp +++ b/Kernel/Syscall.cpp @@ -201,7 +201,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap) PANIC("Syscall from process with IOPL != 0"); } - MM.validate_syscall_preconditions(process.space(), regs); + MM.validate_syscall_preconditions(process.address_space(), regs); FlatPtr function; FlatPtr arg1; diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 8d6b865c054..ec4991a4936 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -672,7 +672,7 @@ KResult Process::do_exec(NonnullRefPtr main_program_description regs.rip = load_result.entry_eip; regs.rsp = new_userspace_sp; #endif - regs.cr3 = space().page_directory().cr3(); + regs.cr3 = address_space().page_directory().cr3(); { TemporaryChange profiling_disabler(m_profiling, was_profiling); diff --git a/Kernel/Syscalls/fork.cpp b/Kernel/Syscalls/fork.cpp index a36135b7767..038d48b6667 100644 --- a/Kernel/Syscalls/fork.cpp +++ b/Kernel/Syscalls/fork.cpp @@ -42,7 +42,7 @@ KResultOr Process::sys$fork(RegisterState& regs) } dbgln_if(FORK_DEBUG, "fork: child={}", child); - child->space().set_enforces_syscall_regions(space().enforces_syscall_regions()); + child->address_space().set_enforces_syscall_regions(address_space().enforces_syscall_regions()); #if ARCH(I386) auto& child_regs = child_first_thread->m_regs; @@ -92,8 +92,8 @@ KResultOr Process::sys$fork(RegisterState& regs) #endif { - ScopedSpinLock lock(space().get_lock()); - for (auto& region : space().regions()) { + ScopedSpinLock lock(address_space().get_lock()); + for (auto& region : address_space().regions()) { dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr()); auto region_clone = region->clone(); if (!region_clone) { @@ -102,13 +102,13 @@ KResultOr Process::sys$fork(RegisterState& regs) return ENOMEM; } - auto* child_region = child->space().add_region(region_clone.release_nonnull()); + auto* child_region = child->address_space().add_region(region_clone.release_nonnull()); if (!child_region) { dbgln("fork: Cannot add region, insufficient memory"); // TODO: tear down new process? return ENOMEM; } - child_region->map(child->space().page_directory(), Memory::ShouldFlushTLB::No); + child_region->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No); if (region == m_master_tls_region.unsafe_ptr()) child->m_master_tls_region = child_region; diff --git a/Kernel/Syscalls/futex.cpp b/Kernel/Syscalls/futex.cpp index e1df9aaf307..5ca6d4b42a3 100644 --- a/Kernel/Syscalls/futex.cpp +++ b/Kernel/Syscalls/futex.cpp @@ -129,7 +129,7 @@ KResultOr Process::sys$futex(Userspace // acquiring the queue lock RefPtr vmobject, vmobject2; if (!is_private) { - auto region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) }); + auto region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) }); if (!region) return EFAULT; vmobject = region->vmobject(); @@ -139,7 +139,7 @@ KResultOr Process::sys$futex(Userspace case FUTEX_REQUEUE: case FUTEX_CMP_REQUEUE: case FUTEX_WAKE_OP: { - auto region2 = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) }); + auto region2 = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) }); if (!region2) return EFAULT; vmobject2 = region2->vmobject(); diff --git a/Kernel/Syscalls/get_stack_bounds.cpp b/Kernel/Syscalls/get_stack_bounds.cpp index f0f5104eec4..2a8c92f5a1c 100644 --- a/Kernel/Syscalls/get_stack_bounds.cpp +++ b/Kernel/Syscalls/get_stack_bounds.cpp @@ -14,7 +14,7 @@ KResultOr Process::sys$get_stack_bounds(Userspace user_stack_ VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this); auto& regs = Thread::current()->get_register_dump_from_stack(); FlatPtr stack_pointer = regs.userspace_sp(); - auto* stack_region = space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 }); + auto* stack_region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 }); // The syscall handler should have killed us if we had an invalid stack pointer. VERIFY(stack_region); diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index df4075e133d..e9e02b74fc5 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -202,13 +202,13 @@ KResultOr Process::sys$mmap(Userspace u Optional range; if (map_randomized) { - range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment); + range = address_space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment); } else { - range = space().allocate_range(VirtualAddress(addr), size, alignment); + range = address_space().allocate_range(VirtualAddress(addr), size, alignment); if (!range.has_value()) { if (addr && !map_fixed) { // If there's an address but MAP_FIXED wasn't specified, the address is just a hint. - range = space().allocate_range({}, size, alignment); + range = address_space().allocate_range({}, size, alignment); } } } @@ -225,7 +225,7 @@ KResultOr Process::sys$mmap(Userspace u vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy); if (!vmobject) return ENOMEM; - auto region_or_error = space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared); + auto region_or_error = address_space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared); if (region_or_error.is_error()) return region_or_error.error().error(); region = region_or_error.value(); @@ -309,7 +309,7 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int if (!is_user_range(range_to_mprotect)) return EFAULT; - if (auto* whole_region = space().find_region_from_range(range_to_mprotect)) { + if (auto* whole_region = address_space().find_region_from_range(range_to_mprotect)) { if (!whole_region->is_mmap()) return EPERM; if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region)) @@ -329,7 +329,7 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int } // Check if we can carve out the desired range from an existing region - if (auto* old_region = space().find_region_containing(range_to_mprotect)) { + if (auto* old_region = address_space().find_region_containing(range_to_mprotect)) { if (!old_region->is_mmap()) return EPERM; if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region)) @@ -343,20 +343,20 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int // Remove the old region from our regions tree, since were going to add another region // with the exact same start address, but dont deallocate it yet - auto region = space().take_region(*old_region); + auto region = address_space().take_region(*old_region); // Unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); // This vector is the region(s) adjacent to our range. // We need to allocate a new region for the range we wanted to change permission bits on. - auto adjacent_regions_or_error = space().try_split_region_around_range(*region, range_to_mprotect); + auto adjacent_regions_or_error = address_space().try_split_region_around_range(*region, range_to_mprotect); if (adjacent_regions_or_error.is_error()) return adjacent_regions_or_error.error(); auto& adjacent_regions = adjacent_regions_or_error.value(); size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get()); - auto new_region_or_error = space().try_allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject); + auto new_region_or_error = address_space().try_allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject); if (new_region_or_error.is_error()) return new_region_or_error.error(); auto& new_region = *new_region_or_error.value(); @@ -366,13 +366,13 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int // Map the new regions using our page directory (they were just allocated and don't have one). for (auto* adjacent_region : adjacent_regions) { - adjacent_region->map(space().page_directory()); + adjacent_region->map(address_space().page_directory()); } - new_region.map(space().page_directory()); + new_region.map(address_space().page_directory()); return 0; } - if (const auto& regions = space().find_regions_intersecting(range_to_mprotect); regions.size()) { + if (const auto& regions = address_space().find_regions_intersecting(range_to_mprotect); regions.size()) { size_t full_size_found = 0; // first check before doing anything for (const auto* region : regions) { @@ -406,14 +406,14 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int } // Remove the old region from our regions tree, since were going to add another region // with the exact same start address, but dont deallocate it yet - auto region = space().take_region(*old_region); + auto region = address_space().take_region(*old_region); // Unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); // This vector is the region(s) adjacent to our range. // We need to allocate a new region for the range we wanted to change permission bits on. - auto adjacent_regions_or_error = space().try_split_region_around_range(*old_region, intersection_to_mprotect); + auto adjacent_regions_or_error = address_space().try_split_region_around_range(*old_region, intersection_to_mprotect); if (adjacent_regions_or_error.is_error()) return adjacent_regions_or_error.error(); auto& adjacent_regions = adjacent_regions_or_error.value(); @@ -422,7 +422,7 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int VERIFY(adjacent_regions.size() == 1); size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_mprotect.base().get() - old_region->range().base().get()); - auto new_region_or_error = space().try_allocate_split_region(*region, intersection_to_mprotect, new_range_offset_in_vmobject); + auto new_region_or_error = address_space().try_allocate_split_region(*region, intersection_to_mprotect, new_range_offset_in_vmobject); if (new_region_or_error.is_error()) return new_region_or_error.error(); @@ -433,9 +433,9 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int // Map the new region using our page directory (they were just allocated and don't have one) if any. if (adjacent_regions.size()) - adjacent_regions[0]->map(space().page_directory()); + adjacent_regions[0]->map(address_space().page_directory()); - new_region.map(space().page_directory()); + new_region.map(address_space().page_directory()); } return 0; @@ -461,7 +461,7 @@ KResultOr Process::sys$madvise(Userspace address, size_t size, i if (!is_user_range(range_to_madvise)) return EFAULT; - auto* region = space().find_region_from_range(range_to_madvise); + auto* region = address_space().find_region_from_range(range_to_madvise); if (!region) return EINVAL; if (!region->is_mmap()) @@ -508,7 +508,7 @@ KResultOr Process::sys$set_mmap_name(Userspaceis_mmap()) @@ -525,7 +525,7 @@ KResultOr Process::sys$munmap(Userspace addr, size_t size) VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this) REQUIRE_PROMISE(stdio); - auto result = space().unmap_mmap_range(VirtualAddress { addr }, size); + auto result = address_space().unmap_mmap_range(VirtualAddress { addr }, size); if (result.is_error()) return result; return 0; @@ -546,7 +546,7 @@ KResultOr Process::sys$mremap(Userspace Process::sys$mremap(Userspaceunmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); - space().deallocate_region(*old_region); + address_space().deallocate_region(*old_region); - auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false); + auto new_region_or_error = address_space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false); if (new_region_or_error.is_error()) return new_region_or_error.error().error(); auto& new_region = *new_region_or_error.value(); @@ -608,11 +608,11 @@ KResultOr Process::sys$allocate_tls(Userspace initial_data if (multiple_threads) return EINVAL; - auto range = space().allocate_range({}, size); + auto range = address_space().allocate_range({}, size); if (!range.has_value()) return ENOMEM; - auto region_or_error = space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE); + auto region_or_error = address_space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE); if (region_or_error.is_error()) return region_or_error.error().error(); @@ -646,18 +646,18 @@ KResultOr Process::sys$allocate_tls(Userspace initial_data KResultOr Process::sys$msyscall(Userspace address) { VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this) - if (space().enforces_syscall_regions()) + if (address_space().enforces_syscall_regions()) return EPERM; if (!address) { - space().set_enforces_syscall_regions(true); + address_space().set_enforces_syscall_regions(true); return 0; } if (!Memory::is_user_address(VirtualAddress { address })) return EFAULT; - auto* region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 }); + auto* region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 }); if (!region) return EINVAL; diff --git a/Kernel/Syscalls/ptrace.cpp b/Kernel/Syscalls/ptrace.cpp index 06ccc8e02d2..32919cd7d5b 100644 --- a/Kernel/Syscalls/ptrace.cpp +++ b/Kernel/Syscalls/ptrace.cpp @@ -195,7 +195,7 @@ KResultOr Process::peek_user_data(Userspace address) KResult Process::poke_user_data(Userspace address, u32 data) { Memory::VirtualRange range = { VirtualAddress(address), sizeof(u32) }; - auto* region = space().find_region_containing(range); + auto* region = address_space().find_region_containing(range); if (!region) return EFAULT; ProcessPagingScope scope(*this); diff --git a/Kernel/Syscalls/thread.cpp b/Kernel/Syscalls/thread.cpp index c13eec5d887..e3c3ff9e077 100644 --- a/Kernel/Syscalls/thread.cpp +++ b/Kernel/Syscalls/thread.cpp @@ -31,7 +31,7 @@ KResultOr Process::sys$create_thread(void* (*entry)(void*), Userspacespace(), VirtualAddress(user_sp.value() - 4))) + if (!MM.validate_user_stack(this->address_space(), VirtualAddress(user_sp.value() - 4))) return EFAULT; // FIXME: return EAGAIN if Thread::all_threads().size() is greater than PTHREAD_THREADS_MAX @@ -73,7 +73,7 @@ KResultOr Process::sys$create_thread(void* (*entry)(void*), Userspacemake_thread_specific_region({}); if (tsr_result.is_error()) @@ -102,7 +102,7 @@ void Process::sys$exit_thread(Userspace exit_value, Userspace stac PerformanceManager::add_thread_exit_event(*current_thread); if (stack_location) { - auto unmap_result = space().unmap_mmap_range(VirtualAddress { stack_location }, stack_size); + auto unmap_result = address_space().unmap_mmap_range(VirtualAddress { stack_location }, stack_size); if (unmap_result.is_error()) dbgln("Failed to unmap thread stack, terminating thread anyway. Error code: {}", unmap_result.error()); } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 7e2425d9bf9..fcb799b9098 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -117,7 +117,7 @@ Thread::Thread(NonnullRefPtr process, NonnullOwnPtr ker m_regs.cs = GDT_SELECTOR_CODE3 | 3; #endif - m_regs.cr3 = m_process->space().page_directory().cr3(); + m_regs.cr3 = m_process->address_space().page_directory().cr3(); m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & ~(FlatPtr)0x7u; @@ -404,8 +404,8 @@ void Thread::exit(void* exit_value) u32 unlock_count; [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count); if (m_thread_specific_range.has_value()) { - auto* region = process().space().find_region_from_range(m_thread_specific_range.value()); - process().space().deallocate_region(*region); + auto* region = process().address_space().find_region_from_range(m_thread_specific_range.value()); + process().address_space().deallocate_region(*region); } #ifdef ENABLE_KERNEL_COVERAGE_COLLECTION KCOVDevice::free_thread(); @@ -1158,7 +1158,7 @@ static bool symbolicate(RecognizedSymbol const& symbol, Process& process, String if (!Memory::is_user_address(VirtualAddress(symbol.address))) { builder.append("0xdeadc0de\n"); } else { - if (auto* region = process.space().find_region_containing({ VirtualAddress(symbol.address), sizeof(FlatPtr) })) { + if (auto* region = process.address_space().find_region_containing({ VirtualAddress(symbol.address), sizeof(FlatPtr) })) { size_t offset = symbol.address - region->vaddr().get(); if (auto region_name = region->name(); !region_name.is_null() && !region_name.is_empty()) builder.appendff("{:p} {} + {:#x}\n", (void*)symbol.address, region_name, offset); @@ -1219,11 +1219,11 @@ KResult Thread::make_thread_specific_region(Badge) if (!process().m_master_tls_region) return KSuccess; - auto range = process().space().allocate_range({}, thread_specific_region_size()); + auto range = process().address_space().allocate_range({}, thread_specific_region_size()); if (!range.has_value()) return ENOMEM; - auto region_or_error = process().space().allocate_region(range.value(), "Thread-specific", PROT_READ | PROT_WRITE); + auto region_or_error = process().address_space().allocate_region(range.value(), "Thread-specific", PROT_READ | PROT_WRITE); if (region_or_error.is_error()) return region_or_error.error();