mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 15:40:19 +00:00
Kernel: Rename Range => VirtualRange
...and also RangeAllocator => VirtualRangeAllocator. This clarifies that the ranges we're dealing with are *virtual* memory ranges and not anything else.
This commit is contained in:
parent
93d98d4976
commit
cd5faf4e42
Notes:
sideshowbarker
2024-07-18 07:24:56 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/cd5faf4e425
39 changed files with 207 additions and 207 deletions
|
@ -140,14 +140,14 @@ set(KERNEL_SOURCES
|
||||||
Memory/PhysicalZone.cpp
|
Memory/PhysicalZone.cpp
|
||||||
Memory/PrivateInodeVMObject.cpp
|
Memory/PrivateInodeVMObject.cpp
|
||||||
Memory/ProcessPagingScope.cpp
|
Memory/ProcessPagingScope.cpp
|
||||||
Memory/Range.cpp
|
|
||||||
Memory/RangeAllocator.cpp
|
|
||||||
Memory/Region.cpp
|
Memory/Region.cpp
|
||||||
Memory/RingBuffer.cpp
|
Memory/RingBuffer.cpp
|
||||||
Memory/ScatterGatherList.cpp
|
Memory/ScatterGatherList.cpp
|
||||||
Memory/SharedInodeVMObject.cpp
|
Memory/SharedInodeVMObject.cpp
|
||||||
Memory/Space.cpp
|
Memory/Space.cpp
|
||||||
Memory/VMObject.cpp
|
Memory/VMObject.cpp
|
||||||
|
Memory/VirtualRange.cpp
|
||||||
|
Memory/VirtualRangeAllocator.cpp
|
||||||
MiniStdLib.cpp
|
MiniStdLib.cpp
|
||||||
Mutex.cpp
|
Mutex.cpp
|
||||||
Net/E1000ENetworkAdapter.cpp
|
Net/E1000ENetworkAdapter.cpp
|
||||||
|
|
|
@ -129,7 +129,7 @@ KResult KCOVDevice::ioctl(FileDescription&, unsigned request, Userspace<void*> a
|
||||||
return return_value;
|
return return_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> KCOVDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> KCOVDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
auto pid = process.pid();
|
auto pid = process.pid();
|
||||||
auto maybe_kcov_instance = proc_instance->get(pid);
|
auto maybe_kcov_instance = proc_instance->get(pid);
|
||||||
|
|
|
@ -22,7 +22,7 @@ public:
|
||||||
static void free_process();
|
static void free_process();
|
||||||
|
|
||||||
// ^File
|
// ^File
|
||||||
KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
|
KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
|
||||||
KResultOr<NonnullRefPtr<FileDescription>> open(int options) override;
|
KResultOr<NonnullRefPtr<FileDescription>> open(int options) override;
|
||||||
|
|
||||||
// ^Device
|
// ^Device
|
||||||
|
|
|
@ -37,7 +37,7 @@ void MemoryDevice::did_seek(FileDescription&, off_t)
|
||||||
TODO();
|
TODO();
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> MemoryDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> MemoryDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
auto viewed_address = PhysicalAddress(offset);
|
auto viewed_address = PhysicalAddress(offset);
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ public:
|
||||||
static NonnullRefPtr<MemoryDevice> must_create();
|
static NonnullRefPtr<MemoryDevice> must_create();
|
||||||
~MemoryDevice();
|
~MemoryDevice();
|
||||||
|
|
||||||
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
|
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
|
||||||
|
|
||||||
// ^Device
|
// ^Device
|
||||||
virtual mode_t required_mode() const override { return 0660; }
|
virtual mode_t required_mode() const override { return 0660; }
|
||||||
|
@ -36,7 +36,7 @@ private:
|
||||||
|
|
||||||
virtual void did_seek(FileDescription&, off_t) override;
|
virtual void did_seek(FileDescription&, off_t) override;
|
||||||
|
|
||||||
bool is_allowed_range(PhysicalAddress, Memory::Range const&) const;
|
bool is_allowed_range(PhysicalAddress, Memory::VirtualRange const&) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ AnonymousFile::~AnonymousFile()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> AnonymousFile::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> AnonymousFile::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
if (offset != 0)
|
if (offset != 0)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
|
@ -20,7 +20,7 @@ public:
|
||||||
|
|
||||||
virtual ~AnonymousFile() override;
|
virtual ~AnonymousFile() override;
|
||||||
|
|
||||||
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
|
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
virtual StringView class_name() const override { return "AnonymousFile"; }
|
virtual StringView class_name() const override { return "AnonymousFile"; }
|
||||||
|
|
|
@ -40,7 +40,7 @@ KResult File::ioctl(FileDescription&, unsigned, Userspace<void*>)
|
||||||
return ENOTTY;
|
return ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> File::mmap(Process&, FileDescription&, Memory::Range const&, u64, int, bool)
|
KResultOr<Memory::Region*> File::mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64, int, bool)
|
||||||
{
|
{
|
||||||
return ENODEV;
|
return ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,7 +88,7 @@ public:
|
||||||
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) = 0;
|
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) = 0;
|
||||||
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) = 0;
|
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) = 0;
|
||||||
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg);
|
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg);
|
||||||
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared);
|
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared);
|
||||||
virtual KResult stat(::stat&) const { return EBADF; }
|
virtual KResult stat(::stat&) const { return EBADF; }
|
||||||
|
|
||||||
virtual String absolute_path(const FileDescription&) const = 0;
|
virtual String absolute_path(const FileDescription&) const = 0;
|
||||||
|
|
|
@ -380,7 +380,7 @@ InodeMetadata FileDescription::metadata() const
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> FileDescription::mmap(Process& process, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> FileDescription::mmap(Process& process, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
MutexLocker locker(m_lock);
|
MutexLocker locker(m_lock);
|
||||||
return m_file->mmap(process, *this, range, offset, prot, shared);
|
return m_file->mmap(process, *this, range, offset, prot, shared);
|
||||||
|
|
|
@ -96,7 +96,7 @@ public:
|
||||||
Custody* custody() { return m_custody.ptr(); }
|
Custody* custody() { return m_custody.ptr(); }
|
||||||
const Custody* custody() const { return m_custody.ptr(); }
|
const Custody* custody() const { return m_custody.ptr(); }
|
||||||
|
|
||||||
KResultOr<Memory::Region*> mmap(Process&, Memory::Range const&, u64 offset, int prot, bool shared);
|
KResultOr<Memory::Region*> mmap(Process&, Memory::VirtualRange const&, u64 offset, int prot, bool shared);
|
||||||
|
|
||||||
bool is_blocking() const { return m_is_blocking; }
|
bool is_blocking() const { return m_is_blocking; }
|
||||||
void set_blocking(bool b) { m_is_blocking = b; }
|
void set_blocking(bool b) { m_is_blocking = b; }
|
||||||
|
|
|
@ -93,7 +93,7 @@ KResult InodeFile::ioctl(FileDescription& description, unsigned request, Userspa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> InodeFile::mmap(Process& process, FileDescription& description, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> InodeFile::mmap(Process& process, FileDescription& description, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
// FIXME: If PROT_EXEC, check that the underlying file system isn't mounted noexec.
|
// FIXME: If PROT_EXEC, check that the underlying file system isn't mounted noexec.
|
||||||
RefPtr<Memory::InodeVMObject> vmobject;
|
RefPtr<Memory::InodeVMObject> vmobject;
|
||||||
|
|
|
@ -33,7 +33,7 @@ public:
|
||||||
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override;
|
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override;
|
||||||
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override;
|
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override;
|
||||||
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
|
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
|
||||||
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
|
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
|
||||||
virtual KResult stat(::stat& buffer) const override { return inode().metadata().stat(buffer); }
|
virtual KResult stat(::stat& buffer) const override { return inode().metadata().stat(buffer); }
|
||||||
|
|
||||||
virtual String absolute_path(const FileDescription&) const override;
|
virtual String absolute_path(const FileDescription&) const override;
|
||||||
|
|
|
@ -74,8 +74,8 @@ class PageDirectory;
|
||||||
class PhysicalPage;
|
class PhysicalPage;
|
||||||
class PhysicalRegion;
|
class PhysicalRegion;
|
||||||
class PrivateInodeVMObject;
|
class PrivateInodeVMObject;
|
||||||
class Range;
|
class VirtualRange;
|
||||||
class RangeAllocator;
|
class VirtualRangeAllocator;
|
||||||
class Region;
|
class Region;
|
||||||
class SharedInodeVMObject;
|
class SharedInodeVMObject;
|
||||||
class Space;
|
class Space;
|
||||||
|
|
|
@ -25,7 +25,7 @@ NonnullRefPtr<FramebufferDevice> FramebufferDevice::create(const GraphicsDevice&
|
||||||
return adopt_ref(*new FramebufferDevice(adapter, output_port_index, paddr, width, height, pitch));
|
return adopt_ref(*new FramebufferDevice(adapter, output_port_index, paddr, width, height, pitch));
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_activation_lock);
|
ScopedSpinLock lock(m_activation_lock);
|
||||||
REQUIRE_PROMISE(video);
|
REQUIRE_PROMISE(video);
|
||||||
|
|
|
@ -23,7 +23,7 @@ public:
|
||||||
static NonnullRefPtr<FramebufferDevice> create(const GraphicsDevice&, size_t, PhysicalAddress, size_t, size_t, size_t);
|
static NonnullRefPtr<FramebufferDevice> create(const GraphicsDevice&, size_t, PhysicalAddress, size_t, size_t, size_t);
|
||||||
|
|
||||||
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
|
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
|
||||||
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
|
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
|
||||||
|
|
||||||
// ^Device
|
// ^Device
|
||||||
virtual mode_t required_mode() const override { return 0660; }
|
virtual mode_t required_mode() const override { return 0660; }
|
||||||
|
|
|
@ -241,7 +241,7 @@ KResult FrameBufferDevice::ioctl(FileDescription&, unsigned request, Userspace<v
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
|
KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
|
||||||
{
|
{
|
||||||
REQUIRE_PROMISE(video);
|
REQUIRE_PROMISE(video);
|
||||||
if (!shared)
|
if (!shared)
|
||||||
|
|
|
@ -61,7 +61,7 @@ private:
|
||||||
void set_buffer(int);
|
void set_buffer(int);
|
||||||
|
|
||||||
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
|
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
|
||||||
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
|
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
|
||||||
virtual bool can_read(const FileDescription&, size_t) const override { return true; }
|
virtual bool can_read(const FileDescription&, size_t) const override { return true; }
|
||||||
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return EINVAL; }
|
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return EINVAL; }
|
||||||
virtual bool can_write(const FileDescription&, size_t) const override { return true; }
|
virtual bool can_write(const FileDescription&, size_t) const override { return true; }
|
||||||
|
|
|
@ -154,12 +154,12 @@ void MemoryManager::unmap_ksyms_after_init()
|
||||||
UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
|
UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
|
||||||
{
|
{
|
||||||
VERIFY(!m_physical_memory_ranges.is_empty());
|
VERIFY(!m_physical_memory_ranges.is_empty());
|
||||||
ContiguousReservedMemoryRange range;
|
ContiguousReservedMemoryVirtualRange range;
|
||||||
for (auto& current_range : m_physical_memory_ranges) {
|
for (auto& current_range : m_physical_memory_ranges) {
|
||||||
if (current_range.type != PhysicalMemoryRangeType::Reserved) {
|
if (current_range.type != PhysicalMemoryVirtualRangeType::Reserved) {
|
||||||
if (range.start.is_null())
|
if (range.start.is_null())
|
||||||
continue;
|
continue;
|
||||||
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
|
m_reserved_memory_ranges.append(ContiguousReservedMemoryVirtualRange { range.start, current_range.start.get() - range.start.get() });
|
||||||
range.start.set((FlatPtr) nullptr);
|
range.start.set((FlatPtr) nullptr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -168,14 +168,14 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
|
||||||
}
|
}
|
||||||
range.start = current_range.start;
|
range.start = current_range.start;
|
||||||
}
|
}
|
||||||
if (m_physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
|
if (m_physical_memory_ranges.last().type != PhysicalMemoryVirtualRangeType::Reserved)
|
||||||
return;
|
return;
|
||||||
if (range.start.is_null())
|
if (range.start.is_null())
|
||||||
return;
|
return;
|
||||||
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
|
m_reserved_memory_ranges.append(ContiguousReservedMemoryVirtualRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, Range const& range) const
|
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, VirtualRange const& range) const
|
||||||
{
|
{
|
||||||
VERIFY(!m_reserved_memory_ranges.is_empty());
|
VERIFY(!m_reserved_memory_ranges.is_empty());
|
||||||
for (auto& current_range : m_reserved_memory_ranges) {
|
for (auto& current_range : m_reserved_memory_ranges) {
|
||||||
|
@ -194,28 +194,28 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
{
|
{
|
||||||
// Register used memory regions that we know of.
|
// Register used memory regions that we know of.
|
||||||
m_used_memory_ranges.ensure_capacity(4);
|
m_used_memory_ranges.ensure_capacity(4);
|
||||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
|
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
|
||||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
|
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
|
||||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
|
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
|
||||||
|
|
||||||
if (multiboot_flags & 0x4) {
|
if (multiboot_flags & 0x4) {
|
||||||
auto* bootmods_start = multiboot_copy_boot_modules_array;
|
auto* bootmods_start = multiboot_copy_boot_modules_array;
|
||||||
auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
|
auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
|
||||||
|
|
||||||
for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
|
for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
|
||||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
|
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto* mmap_begin = multiboot_memory_map;
|
auto* mmap_begin = multiboot_memory_map;
|
||||||
auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
|
auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
|
||||||
|
|
||||||
struct ContiguousPhysicalRange {
|
struct ContiguousPhysicalVirtualRange {
|
||||||
PhysicalAddress lower;
|
PhysicalAddress lower;
|
||||||
PhysicalAddress upper;
|
PhysicalAddress upper;
|
||||||
};
|
};
|
||||||
|
|
||||||
Vector<ContiguousPhysicalRange> contiguous_physical_ranges;
|
Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
|
||||||
|
|
||||||
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
||||||
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
|
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
|
||||||
|
@ -224,24 +224,24 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
auto length = mmap->len;
|
auto length = mmap->len;
|
||||||
switch (mmap->type) {
|
switch (mmap->type) {
|
||||||
case (MULTIBOOT_MEMORY_AVAILABLE):
|
case (MULTIBOOT_MEMORY_AVAILABLE):
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Usable, start_address, length });
|
||||||
break;
|
break;
|
||||||
case (MULTIBOOT_MEMORY_RESERVED):
|
case (MULTIBOOT_MEMORY_RESERVED):
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Reserved, start_address, length });
|
||||||
break;
|
break;
|
||||||
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
|
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::ACPI_Reclaimable, start_address, length });
|
||||||
break;
|
break;
|
||||||
case (MULTIBOOT_MEMORY_NVS):
|
case (MULTIBOOT_MEMORY_NVS):
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::ACPI_NVS, start_address, length });
|
||||||
break;
|
break;
|
||||||
case (MULTIBOOT_MEMORY_BADRAM):
|
case (MULTIBOOT_MEMORY_BADRAM):
|
||||||
dmesgln("MM: Warning, detected bad memory range!");
|
dmesgln("MM: Warning, detected bad memory range!");
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::BadMemory, start_address, length });
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dbgln("MM: Unknown range!");
|
dbgln("MM: Unknown range!");
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Unknown, start_address, length });
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +280,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) {
|
if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) {
|
||||||
contiguous_physical_ranges.append(ContiguousPhysicalRange {
|
contiguous_physical_ranges.append(ContiguousPhysicalVirtualRange {
|
||||||
.lower = addr,
|
.lower = addr,
|
||||||
.upper = addr,
|
.upper = addr,
|
||||||
});
|
});
|
||||||
|
@ -322,7 +322,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
|
m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
|
||||||
|
|
||||||
for (auto& used_range : m_used_memory_ranges) {
|
for (auto& used_range : m_used_memory_ranges) {
|
||||||
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
|
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryVirtualRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
|
||||||
}
|
}
|
||||||
|
|
||||||
dmesgln("MM: Super physical region: {} - {} (size {:#x})", m_super_physical_region->lower(), m_super_physical_region->upper().offset(-1), PAGE_SIZE * m_super_physical_region->size());
|
dmesgln("MM: Super physical region: {} - {} (size {:#x})", m_super_physical_region->lower(), m_super_physical_region->upper().offset(-1), PAGE_SIZE * m_super_physical_region->size());
|
||||||
|
@ -389,7 +389,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
||||||
} else {
|
} else {
|
||||||
m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
|
m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
|
||||||
}
|
}
|
||||||
m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
|
m_used_memory_ranges.append({ UsedMemoryVirtualRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
|
||||||
|
|
||||||
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
|
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
|
||||||
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
|
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
|
||||||
|
@ -746,7 +746,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
|
||||||
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(Range const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);
|
auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);
|
||||||
|
|
|
@ -46,7 +46,7 @@ inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
|
||||||
return virtual_ - physical_to_virtual_offset;
|
return virtual_ - physical_to_virtual_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum class UsedMemoryRangeType {
|
enum class UsedMemoryVirtualRangeType {
|
||||||
LowMemory = 0,
|
LowMemory = 0,
|
||||||
Prekernel,
|
Prekernel,
|
||||||
Kernel,
|
Kernel,
|
||||||
|
@ -54,7 +54,7 @@ enum class UsedMemoryRangeType {
|
||||||
PhysicalPages,
|
PhysicalPages,
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr StringView UserMemoryRangeTypeNames[] {
|
static constexpr StringView UserMemoryVirtualRangeTypeNames[] {
|
||||||
"Low memory",
|
"Low memory",
|
||||||
"Prekernel",
|
"Prekernel",
|
||||||
"Kernel",
|
"Kernel",
|
||||||
|
@ -62,18 +62,18 @@ static constexpr StringView UserMemoryRangeTypeNames[] {
|
||||||
"Physical Pages"
|
"Physical Pages"
|
||||||
};
|
};
|
||||||
|
|
||||||
struct UsedMemoryRange {
|
struct UsedMemoryVirtualRange {
|
||||||
UsedMemoryRangeType type {};
|
UsedMemoryVirtualRangeType type {};
|
||||||
PhysicalAddress start;
|
PhysicalAddress start;
|
||||||
PhysicalAddress end;
|
PhysicalAddress end;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ContiguousReservedMemoryRange {
|
struct ContiguousReservedMemoryVirtualRange {
|
||||||
PhysicalAddress start;
|
PhysicalAddress start;
|
||||||
PhysicalSize length {};
|
PhysicalSize length {};
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class PhysicalMemoryRangeType {
|
enum class PhysicalMemoryVirtualRangeType {
|
||||||
Usable = 0,
|
Usable = 0,
|
||||||
Reserved,
|
Reserved,
|
||||||
ACPI_Reclaimable,
|
ACPI_Reclaimable,
|
||||||
|
@ -82,8 +82,8 @@ enum class PhysicalMemoryRangeType {
|
||||||
Unknown,
|
Unknown,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct PhysicalMemoryRange {
|
struct PhysicalMemoryVirtualRange {
|
||||||
PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
|
PhysicalMemoryVirtualRangeType type { PhysicalMemoryVirtualRangeType::Unknown };
|
||||||
PhysicalAddress start;
|
PhysicalAddress start;
|
||||||
PhysicalSize length {};
|
PhysicalSize length {};
|
||||||
};
|
};
|
||||||
|
@ -185,7 +185,7 @@ public:
|
||||||
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
|
|
||||||
struct SystemMemoryInfo {
|
struct SystemMemoryInfo {
|
||||||
PhysicalSize user_physical_pages { 0 };
|
PhysicalSize user_physical_pages { 0 };
|
||||||
|
@ -230,8 +230,8 @@ public:
|
||||||
|
|
||||||
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
||||||
|
|
||||||
Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
|
Vector<UsedMemoryVirtualRange> const& used_memory_ranges() { return m_used_memory_ranges; }
|
||||||
bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const;
|
bool is_allowed_to_mmap_to_userspace(PhysicalAddress, VirtualRange const&) const;
|
||||||
|
|
||||||
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
|
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
|
||||||
PhysicalAddress get_physical_address(PhysicalPage const&);
|
PhysicalAddress get_physical_address(PhysicalPage const&);
|
||||||
|
@ -288,9 +288,9 @@ private:
|
||||||
|
|
||||||
Region::ListInMemoryManager m_user_regions;
|
Region::ListInMemoryManager m_user_regions;
|
||||||
Region::ListInMemoryManager m_kernel_regions;
|
Region::ListInMemoryManager m_kernel_regions;
|
||||||
Vector<UsedMemoryRange> m_used_memory_ranges;
|
Vector<UsedMemoryVirtualRange> m_used_memory_ranges;
|
||||||
Vector<PhysicalMemoryRange> m_physical_memory_ranges;
|
Vector<PhysicalMemoryVirtualRange> m_physical_memory_ranges;
|
||||||
Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
|
Vector<ContiguousReservedMemoryVirtualRange> m_reserved_memory_ranges;
|
||||||
|
|
||||||
VMObject::List m_vmobjects;
|
VMObject::List m_vmobjects;
|
||||||
};
|
};
|
||||||
|
@ -307,7 +307,7 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size)
|
||||||
return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
|
return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool is_user_range(Range const& range)
|
inline bool is_user_range(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
return is_user_range(range.base(), range.size());
|
return is_user_range(range.base(), range.size());
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_
|
||||||
return directory;
|
return directory;
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(RangeAllocator const* parent_range_allocator)
|
RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator)
|
||||||
{
|
{
|
||||||
constexpr FlatPtr userspace_range_base = 0x00800000;
|
constexpr FlatPtr userspace_range_base = 0x00800000;
|
||||||
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
|
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
#include <AK/RefPtr.h>
|
#include <AK/RefPtr.h>
|
||||||
#include <Kernel/Forward.h>
|
#include <Kernel/Forward.h>
|
||||||
#include <Kernel/Memory/PhysicalPage.h>
|
#include <Kernel/Memory/PhysicalPage.h>
|
||||||
#include <Kernel/Memory/RangeAllocator.h>
|
#include <Kernel/Memory/VirtualRangeAllocator.h>
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ class PageDirectory : public RefCounted<PageDirectory> {
|
||||||
friend class MemoryManager;
|
friend class MemoryManager;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static RefPtr<PageDirectory> try_create_for_userspace(RangeAllocator const* parent_range_allocator = nullptr);
|
static RefPtr<PageDirectory> try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator = nullptr);
|
||||||
static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory();
|
static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory();
|
||||||
static RefPtr<PageDirectory> find_by_cr3(FlatPtr);
|
static RefPtr<PageDirectory> find_by_cr3(FlatPtr);
|
||||||
|
|
||||||
|
@ -36,10 +36,10 @@ public:
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
RangeAllocator& range_allocator() { return m_range_allocator; }
|
VirtualRangeAllocator& range_allocator() { return m_range_allocator; }
|
||||||
const RangeAllocator& range_allocator() const { return m_range_allocator; }
|
VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; }
|
||||||
|
|
||||||
RangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
|
VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
|
||||||
|
|
||||||
Space* space() { return m_space; }
|
Space* space() { return m_space; }
|
||||||
const Space* space() const { return m_space; }
|
const Space* space() const { return m_space; }
|
||||||
|
@ -52,8 +52,8 @@ private:
|
||||||
PageDirectory();
|
PageDirectory();
|
||||||
|
|
||||||
Space* m_space { nullptr };
|
Space* m_space { nullptr };
|
||||||
RangeAllocator m_range_allocator;
|
VirtualRangeAllocator m_range_allocator;
|
||||||
RangeAllocator m_identity_range_allocator;
|
VirtualRangeAllocator m_identity_range_allocator;
|
||||||
#if ARCH(X86_64)
|
#if ARCH(X86_64)
|
||||||
RefPtr<PhysicalPage> m_pml4t;
|
RefPtr<PhysicalPage> m_pml4t;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,48 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
||||||
*
|
|
||||||
* SPDX-License-Identifier: BSD-2-Clause
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <AK/RedBlackTree.h>
|
|
||||||
#include <AK/Traits.h>
|
|
||||||
#include <Kernel/Memory/Range.h>
|
|
||||||
#include <Kernel/SpinLock.h>
|
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
|
||||||
|
|
||||||
class RangeAllocator {
|
|
||||||
public:
|
|
||||||
RangeAllocator();
|
|
||||||
~RangeAllocator() = default;
|
|
||||||
|
|
||||||
void initialize_with_range(VirtualAddress, size_t);
|
|
||||||
void initialize_from_parent(RangeAllocator const&);
|
|
||||||
|
|
||||||
Optional<Range> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
|
||||||
Optional<Range> allocate_specific(VirtualAddress, size_t);
|
|
||||||
Optional<Range> allocate_randomized(size_t, size_t alignment);
|
|
||||||
void deallocate(Range const&);
|
|
||||||
|
|
||||||
void dump() const;
|
|
||||||
|
|
||||||
bool contains(Range const& range) const { return m_total_range.contains(range); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
void carve_at_iterator(auto&, Range const&);
|
|
||||||
|
|
||||||
RedBlackTree<FlatPtr, Range> m_available_ranges;
|
|
||||||
Range m_total_range;
|
|
||||||
mutable SpinLock<u8> m_lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace AK {
|
|
||||||
template<>
|
|
||||||
struct Traits<Kernel::Memory::Range> : public GenericTraits<Kernel::Memory::Range> {
|
|
||||||
static constexpr bool is_trivial() { return true; }
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -19,7 +19,7 @@
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_offset_in_vmobject(offset_in_vmobject)
|
, m_offset_in_vmobject(offset_in_vmobject)
|
||||||
, m_vmobject(move(vmobject))
|
, m_vmobject(move(vmobject))
|
||||||
|
@ -41,11 +41,11 @@ Region::~Region()
|
||||||
m_vmobject->remove_region(*this);
|
m_vmobject->remove_region(*this);
|
||||||
|
|
||||||
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
|
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
|
||||||
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
|
// Unmapping the region will give the VM back to the VirtualRangeAllocator, so an interrupt handler would
|
||||||
// find the address<->region mappings in an invalid state there.
|
// find the address<->region mappings in an invalid state there.
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
if (m_page_directory) {
|
if (m_page_directory) {
|
||||||
unmap(ShouldDeallocateVirtualMemoryRange::Yes);
|
unmap(ShouldDeallocateVirtualMemoryVirtualRange::Yes);
|
||||||
VERIFY(!m_page_directory);
|
VERIFY(!m_page_directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ size_t Region::amount_shared() const
|
||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
OwnPtr<Region> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||||
{
|
{
|
||||||
auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
|
auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
|
||||||
if (!region)
|
if (!region)
|
||||||
|
@ -155,7 +155,7 @@ OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRef
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> Region::try_create_kernel_only(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
|
OwnPtr<Region> Region::try_create_kernel_only(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
|
||||||
{
|
{
|
||||||
return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
|
return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
|
||||||
}
|
}
|
||||||
|
@ -234,7 +234,7 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
void Region::unmap(ShouldDeallocateVirtualMemoryVirtualRange deallocate_range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
if (!m_page_directory)
|
if (!m_page_directory)
|
||||||
|
@ -246,7 +246,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
||||||
MM.release_pte(*m_page_directory, vaddr, i == count - 1);
|
MM.release_pte(*m_page_directory, vaddr, i == count - 1);
|
||||||
}
|
}
|
||||||
MM.flush_tlb(m_page_directory, vaddr(), page_count());
|
MM.flush_tlb(m_page_directory, vaddr(), page_count());
|
||||||
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
|
if (deallocate_range == ShouldDeallocateVirtualMemoryVirtualRange::Yes) {
|
||||||
if (m_page_directory->range_allocator().contains(range()))
|
if (m_page_directory->range_allocator().contains(range()))
|
||||||
m_page_directory->range_allocator().deallocate(range());
|
m_page_directory->range_allocator().deallocate(range());
|
||||||
else
|
else
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
#include <Kernel/Heap/SlabAllocator.h>
|
#include <Kernel/Heap/SlabAllocator.h>
|
||||||
#include <Kernel/KString.h>
|
#include <Kernel/KString.h>
|
||||||
#include <Kernel/Memory/PageFaultResponse.h>
|
#include <Kernel/Memory/PageFaultResponse.h>
|
||||||
#include <Kernel/Memory/RangeAllocator.h>
|
#include <Kernel/Memory/VirtualRangeAllocator.h>
|
||||||
#include <Kernel/Sections.h>
|
#include <Kernel/Sections.h>
|
||||||
#include <Kernel/UnixTypes.h>
|
#include <Kernel/UnixTypes.h>
|
||||||
|
|
||||||
|
@ -46,12 +46,12 @@ public:
|
||||||
Yes,
|
Yes,
|
||||||
};
|
};
|
||||||
|
|
||||||
static OwnPtr<Region> try_create_user_accessible(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
|
static OwnPtr<Region> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
|
||||||
static OwnPtr<Region> try_create_kernel_only(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
|
static OwnPtr<Region> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
|
||||||
|
|
||||||
~Region();
|
~Region();
|
||||||
|
|
||||||
Range const& range() const { return m_range; }
|
VirtualRange const& range() const { return m_range; }
|
||||||
VirtualAddress vaddr() const { return m_range.base(); }
|
VirtualAddress vaddr() const { return m_range.base(); }
|
||||||
size_t size() const { return m_range.size(); }
|
size_t size() const { return m_range.size(); }
|
||||||
bool is_readable() const { return m_access & Access::Read; }
|
bool is_readable() const { return m_access & Access::Read; }
|
||||||
|
@ -94,7 +94,7 @@ public:
|
||||||
return m_range.contains(vaddr);
|
return m_range.contains(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains(Range const& range) const
|
bool contains(VirtualRange const& range) const
|
||||||
{
|
{
|
||||||
return m_range.contains(range);
|
return m_range.contains(range);
|
||||||
}
|
}
|
||||||
|
@ -168,11 +168,11 @@ public:
|
||||||
|
|
||||||
void set_page_directory(PageDirectory&);
|
void set_page_directory(PageDirectory&);
|
||||||
bool map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
|
bool map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
|
||||||
enum class ShouldDeallocateVirtualMemoryRange {
|
enum class ShouldDeallocateVirtualMemoryVirtualRange {
|
||||||
No,
|
No,
|
||||||
Yes,
|
Yes,
|
||||||
};
|
};
|
||||||
void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
|
void unmap(ShouldDeallocateVirtualMemoryVirtualRange = ShouldDeallocateVirtualMemoryVirtualRange::Yes);
|
||||||
|
|
||||||
void remap();
|
void remap();
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ public:
|
||||||
void set_syscall_region(bool b) { m_syscall_region = b; }
|
void set_syscall_region(bool b) { m_syscall_region = b; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Region(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||||
|
|
||||||
bool remap_vmobject_page(size_t page_index, bool with_flush = true);
|
bool remap_vmobject_page(size_t page_index, bool with_flush = true);
|
||||||
bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
|
bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
|
||||||
|
@ -200,7 +200,7 @@ private:
|
||||||
bool map_individual_page_impl(size_t page_index);
|
bool map_individual_page_impl(size_t page_index);
|
||||||
|
|
||||||
RefPtr<PageDirectory> m_page_directory;
|
RefPtr<PageDirectory> m_page_directory;
|
||||||
Range m_range;
|
VirtualRange m_range;
|
||||||
size_t m_offset_in_vmobject { 0 };
|
size_t m_offset_in_vmobject { 0 };
|
||||||
NonnullRefPtr<VMObject> m_vmobject;
|
NonnullRefPtr<VMObject> m_vmobject;
|
||||||
OwnPtr<KString> m_name;
|
OwnPtr<KString> m_name;
|
||||||
|
|
|
@ -42,7 +42,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
||||||
if (!size)
|
if (!size)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
auto range_or_error = Range::expand_to_page_boundaries(addr.get(), size);
|
auto range_or_error = VirtualRange::expand_to_page_boundaries(addr.get(), size);
|
||||||
if (range_or_error.is_error())
|
if (range_or_error.is_error())
|
||||||
return range_or_error.error();
|
return range_or_error.error();
|
||||||
auto range_to_unmap = range_or_error.value();
|
auto range_to_unmap = range_or_error.value();
|
||||||
|
@ -69,7 +69,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
||||||
auto region = take_region(*old_region);
|
auto region = take_region(*old_region);
|
||||||
|
|
||||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
region->unmap(Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||||
|
|
||||||
auto new_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
|
auto new_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
|
||||||
if (new_regions_or_error.is_error())
|
if (new_regions_or_error.is_error())
|
||||||
|
@ -115,7 +115,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
||||||
auto region = take_region(*old_region);
|
auto region = take_region(*old_region);
|
||||||
|
|
||||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
region->unmap(Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||||
|
|
||||||
// Otherwise, split the regions and collect them for future mapping.
|
// Otherwise, split the regions and collect them for future mapping.
|
||||||
auto split_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
|
auto split_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
|
||||||
|
@ -139,7 +139,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
vaddr.mask(PAGE_MASK);
|
vaddr.mask(PAGE_MASK);
|
||||||
size = page_round_up(size);
|
size = page_round_up(size);
|
||||||
|
@ -148,7 +148,7 @@ Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t
|
||||||
return page_directory().range_allocator().allocate_specific(vaddr, size);
|
return page_directory().range_allocator().allocate_specific(vaddr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, Range const& range, size_t offset_in_vmobject)
|
KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
|
||||||
{
|
{
|
||||||
auto new_region = Region::try_create_user_accessible(
|
auto new_region = Region::try_create_user_accessible(
|
||||||
range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared());
|
range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared());
|
||||||
|
@ -168,7 +168,7 @@ KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region,
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, int prot, AllocationStrategy strategy)
|
KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
|
||||||
{
|
{
|
||||||
VERIFY(range.is_valid());
|
VERIFY(range.is_valid());
|
||||||
auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
|
auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
|
||||||
|
@ -185,7 +185,7 @@ KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, i
|
||||||
return added_region;
|
return added_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Region*> Space::allocate_region_with_vmobject(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||||
{
|
{
|
||||||
VERIFY(range.is_valid());
|
VERIFY(range.is_valid());
|
||||||
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
||||||
|
@ -232,7 +232,7 @@ NonnullOwnPtr<Region> Space::take_region(Region& region)
|
||||||
return found_region;
|
return found_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Space::find_region_from_range(const Range& range)
|
Region* Space::find_region_from_range(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
|
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
|
||||||
|
@ -250,7 +250,7 @@ Region* Space::find_region_from_range(const Range& range)
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Space::find_region_containing(const Range& range)
|
Region* Space::find_region_containing(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
auto candidate = m_regions.find_largest_not_above(range.base().get());
|
auto candidate = m_regions.find_largest_not_above(range.base().get());
|
||||||
|
@ -259,7 +259,7 @@ Region* Space::find_region_containing(const Range& range)
|
||||||
return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
|
return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<Region*> Space::find_regions_intersecting(const Range& range)
|
Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
Vector<Region*> regions = {};
|
Vector<Region*> regions = {};
|
||||||
size_t total_size_collected = 0;
|
size_t total_size_collected = 0;
|
||||||
|
@ -291,13 +291,13 @@ Region* Space::add_region(NonnullOwnPtr<Region> region)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Carve out a virtual address range from a region and return the two regions on either side
|
// Carve out a virtual address range from a region and return the two regions on either side
|
||||||
KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, const Range& desired_range)
|
KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
|
||||||
{
|
{
|
||||||
Range old_region_range = source_region.range();
|
VirtualRange old_region_range = source_region.range();
|
||||||
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
|
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
|
||||||
|
|
||||||
VERIFY(!remaining_ranges_after_unmap.is_empty());
|
VERIFY(!remaining_ranges_after_unmap.is_empty());
|
||||||
auto try_make_replacement_region = [&](const Range& new_range) -> KResultOr<Region*> {
|
auto try_make_replacement_region = [&](VirtualRange const& new_range) -> KResultOr<Region*> {
|
||||||
VERIFY(old_region_range.contains(new_range));
|
VERIFY(old_region_range.contains(new_range));
|
||||||
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
|
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
|
||||||
return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);
|
return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);
|
||||||
|
|
|
@ -35,20 +35,20 @@ public:
|
||||||
|
|
||||||
KResult unmap_mmap_range(VirtualAddress, size_t);
|
KResult unmap_mmap_range(VirtualAddress, size_t);
|
||||||
|
|
||||||
Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
Optional<VirtualRange> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
||||||
|
|
||||||
KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
KResultOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||||
KResultOr<Region*> allocate_region(const Range&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
KResultOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||||
void deallocate_region(Region& region);
|
void deallocate_region(Region& region);
|
||||||
NonnullOwnPtr<Region> take_region(Region& region);
|
NonnullOwnPtr<Region> take_region(Region& region);
|
||||||
|
|
||||||
KResultOr<Region*> try_allocate_split_region(Region const& source_region, Range const&, size_t offset_in_vmobject);
|
KResultOr<Region*> try_allocate_split_region(Region const& source_region, VirtualRange const&, size_t offset_in_vmobject);
|
||||||
KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, Range const&);
|
KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, VirtualRange const&);
|
||||||
|
|
||||||
Region* find_region_from_range(const Range&);
|
Region* find_region_from_range(VirtualRange const&);
|
||||||
Region* find_region_containing(const Range&);
|
Region* find_region_containing(VirtualRange const&);
|
||||||
|
|
||||||
Vector<Region*> find_regions_intersecting(const Range&);
|
Vector<Region*> find_regions_intersecting(VirtualRange const&);
|
||||||
|
|
||||||
bool enforces_syscall_regions() const { return m_enforces_syscall_regions; }
|
bool enforces_syscall_regions() const { return m_enforces_syscall_regions; }
|
||||||
void set_enforces_syscall_regions(bool b) { m_enforces_syscall_regions = b; }
|
void set_enforces_syscall_regions(bool b) { m_enforces_syscall_regions = b; }
|
||||||
|
@ -76,7 +76,7 @@ private:
|
||||||
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions;
|
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions;
|
||||||
|
|
||||||
struct RegionLookupCache {
|
struct RegionLookupCache {
|
||||||
Optional<Range> range;
|
Optional<VirtualRange> range;
|
||||||
WeakPtr<Region> region;
|
WeakPtr<Region> region;
|
||||||
};
|
};
|
||||||
RegionLookupCache m_region_lookup_cache;
|
RegionLookupCache m_region_lookup_cache;
|
||||||
|
|
|
@ -7,16 +7,16 @@
|
||||||
|
|
||||||
#include <AK/Vector.h>
|
#include <AK/Vector.h>
|
||||||
#include <Kernel/Memory/MemoryManager.h>
|
#include <Kernel/Memory/MemoryManager.h>
|
||||||
#include <Kernel/Memory/Range.h>
|
#include <Kernel/Memory/VirtualRange.h>
|
||||||
#include <LibC/limits.h>
|
#include <LibC/limits.h>
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
Vector<Range, 2> Range::carve(const Range& taken) const
|
Vector<VirtualRange, 2> VirtualRange::carve(VirtualRange const& taken) const
|
||||||
{
|
{
|
||||||
VERIFY((taken.size() % PAGE_SIZE) == 0);
|
VERIFY((taken.size() % PAGE_SIZE) == 0);
|
||||||
|
|
||||||
Vector<Range, 2> parts;
|
Vector<VirtualRange, 2> parts;
|
||||||
if (taken == *this)
|
if (taken == *this)
|
||||||
return {};
|
return {};
|
||||||
if (taken.base() > base())
|
if (taken.base() > base())
|
||||||
|
@ -25,7 +25,7 @@ Vector<Range, 2> Range::carve(const Range& taken) const
|
||||||
parts.append({ taken.end(), end().get() - taken.end().get() });
|
parts.append({ taken.end(), end().get() - taken.end().get() });
|
||||||
return parts;
|
return parts;
|
||||||
}
|
}
|
||||||
Range Range::intersect(const Range& other) const
|
VirtualRange VirtualRange::intersect(VirtualRange const& other) const
|
||||||
{
|
{
|
||||||
if (*this == other) {
|
if (*this == other) {
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -33,10 +33,10 @@ Range Range::intersect(const Range& other) const
|
||||||
auto new_base = max(base(), other.base());
|
auto new_base = max(base(), other.base());
|
||||||
auto new_end = min(end(), other.end());
|
auto new_end = min(end(), other.end());
|
||||||
VERIFY(new_base < new_end);
|
VERIFY(new_base < new_end);
|
||||||
return Range(new_base, (new_end - new_base).get());
|
return VirtualRange(new_base, (new_end - new_base).get());
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size)
|
KResultOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size)
|
||||||
{
|
{
|
||||||
if (page_round_up_would_wrap(size))
|
if (page_round_up_would_wrap(size))
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
@ -50,7 +50,7 @@ KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size)
|
||||||
auto base = VirtualAddress { address }.page_base();
|
auto base = VirtualAddress { address }.page_base();
|
||||||
auto end = page_round_up(address + size);
|
auto end = page_round_up(address + size);
|
||||||
|
|
||||||
return Range { base, end - base.get() };
|
return VirtualRange { base, end - base.get() };
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -12,12 +12,12 @@
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
class Range {
|
class VirtualRange {
|
||||||
friend class RangeAllocator;
|
friend class VirtualRangeAllocator;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Range() = delete;
|
VirtualRange() = delete;
|
||||||
Range(VirtualAddress base, size_t size)
|
VirtualRange(VirtualAddress base, size_t size)
|
||||||
: m_base(base)
|
: m_base(base)
|
||||||
, m_size(size)
|
, m_size(size)
|
||||||
{
|
{
|
||||||
|
@ -31,7 +31,7 @@ public:
|
||||||
|
|
||||||
VirtualAddress end() const { return m_base.offset(m_size); }
|
VirtualAddress end() const { return m_base.offset(m_size); }
|
||||||
|
|
||||||
bool operator==(const Range& other) const
|
bool operator==(VirtualRange const& other) const
|
||||||
{
|
{
|
||||||
return m_base == other.m_base && m_size == other.m_size;
|
return m_base == other.m_base && m_size == other.m_size;
|
||||||
}
|
}
|
||||||
|
@ -43,15 +43,15 @@ public:
|
||||||
return base >= m_base && base.offset(size) <= end();
|
return base >= m_base && base.offset(size) <= end();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains(const Range& other) const
|
bool contains(VirtualRange const& other) const
|
||||||
{
|
{
|
||||||
return contains(other.base(), other.size());
|
return contains(other.base(), other.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<Range, 2> carve(const Range&) const;
|
Vector<VirtualRange, 2> carve(VirtualRange const&) const;
|
||||||
Range intersect(const Range&) const;
|
VirtualRange intersect(VirtualRange const&) const;
|
||||||
|
|
||||||
static KResultOr<Range> expand_to_page_boundaries(FlatPtr address, size_t size);
|
static KResultOr<VirtualRange> expand_to_page_boundaries(FlatPtr address, size_t size);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VirtualAddress m_base;
|
VirtualAddress m_base;
|
||||||
|
@ -61,8 +61,8 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
struct AK::Formatter<Kernel::Memory::Range> : Formatter<FormatString> {
|
struct AK::Formatter<Kernel::Memory::VirtualRange> : Formatter<FormatString> {
|
||||||
void format(FormatBuilder& builder, Kernel::Memory::Range value)
|
void format(FormatBuilder& builder, Kernel::Memory::VirtualRange value)
|
||||||
{
|
{
|
||||||
return Formatter<FormatString>::format(builder, "{} - {} (size {:p})", value.base().as_ptr(), value.base().offset(value.size() - 1).as_ptr(), value.size());
|
return Formatter<FormatString>::format(builder, "{} - {} (size {:p})", value.base().as_ptr(), value.base().offset(value.size() - 1).as_ptr(), value.size());
|
||||||
}
|
}
|
|
@ -5,25 +5,25 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <AK/Checked.h>
|
#include <AK/Checked.h>
|
||||||
#include <Kernel/Memory/RangeAllocator.h>
|
#include <Kernel/Memory/VirtualRangeAllocator.h>
|
||||||
#include <Kernel/Random.h>
|
#include <Kernel/Random.h>
|
||||||
|
|
||||||
#define VM_GUARD_PAGES
|
#define VM_GUARD_PAGES
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
RangeAllocator::RangeAllocator()
|
VirtualRangeAllocator::VirtualRangeAllocator()
|
||||||
: m_total_range({}, 0)
|
: m_total_range({}, 0)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
|
void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
|
||||||
{
|
{
|
||||||
m_total_range = { base, size };
|
m_total_range = { base, size };
|
||||||
m_available_ranges.insert(base.get(), Range { base, size });
|
m_available_ranges.insert(base.get(), VirtualRange { base, size });
|
||||||
}
|
}
|
||||||
|
|
||||||
void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocator)
|
void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(parent_allocator.m_lock);
|
ScopedSpinLock lock(parent_allocator.m_lock);
|
||||||
m_total_range = parent_allocator.m_total_range;
|
m_total_range = parent_allocator.m_total_range;
|
||||||
|
@ -33,16 +33,16 @@ void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RangeAllocator::dump() const
|
void VirtualRangeAllocator::dump() const
|
||||||
{
|
{
|
||||||
VERIFY(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
dbgln("RangeAllocator({})", this);
|
dbgln("VirtualRangeAllocator({})", this);
|
||||||
for (auto& range : m_available_ranges) {
|
for (auto& range : m_available_ranges) {
|
||||||
dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
|
dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RangeAllocator::carve_at_iterator(auto& it, Range const& range)
|
void VirtualRangeAllocator::carve_at_iterator(auto& it, VirtualRange const& range)
|
||||||
{
|
{
|
||||||
VERIFY(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
auto remaining_parts = (*it).carve(range);
|
auto remaining_parts = (*it).carve(range);
|
||||||
|
@ -56,7 +56,7 @@ void RangeAllocator::carve_at_iterator(auto& it, Range const& range)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
|
Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
return {};
|
return {};
|
||||||
|
@ -80,7 +80,7 @@ Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignmen
|
||||||
return allocate_anywhere(size, alignment);
|
return allocate_anywhere(size, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
return {};
|
return {};
|
||||||
|
@ -114,7 +114,7 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||||
FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
|
FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
|
||||||
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
|
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
|
||||||
|
|
||||||
Range const allocated_range(VirtualAddress(aligned_base), size);
|
VirtualRange const allocated_range(VirtualAddress(aligned_base), size);
|
||||||
|
|
||||||
VERIFY(m_total_range.contains(allocated_range));
|
VERIFY(m_total_range.contains(allocated_range));
|
||||||
|
|
||||||
|
@ -125,11 +125,11 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||||
carve_at_iterator(it, allocated_range);
|
carve_at_iterator(it, allocated_range);
|
||||||
return allocated_range;
|
return allocated_range;
|
||||||
}
|
}
|
||||||
dmesgln("RangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
|
dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
return {};
|
return {};
|
||||||
|
@ -137,7 +137,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
|
||||||
VERIFY(base.is_page_aligned());
|
VERIFY(base.is_page_aligned());
|
||||||
VERIFY((size % PAGE_SIZE) == 0);
|
VERIFY((size % PAGE_SIZE) == 0);
|
||||||
|
|
||||||
Range const allocated_range(base, size);
|
VirtualRange const allocated_range(base, size);
|
||||||
if (!m_total_range.contains(allocated_range)) {
|
if (!m_total_range.contains(allocated_range)) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
@ -157,7 +157,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void RangeAllocator::deallocate(Range const& range)
|
void VirtualRangeAllocator::deallocate(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
VERIFY(m_total_range.contains(range));
|
VERIFY(m_total_range.contains(range));
|
||||||
|
@ -166,7 +166,7 @@ void RangeAllocator::deallocate(Range const& range)
|
||||||
VERIFY(range.base() < range.end());
|
VERIFY(range.base() < range.end());
|
||||||
VERIFY(!m_available_ranges.is_empty());
|
VERIFY(!m_available_ranges.is_empty());
|
||||||
|
|
||||||
Range merged_range = range;
|
VirtualRange merged_range = range;
|
||||||
|
|
||||||
{
|
{
|
||||||
// Try merging with preceding range.
|
// Try merging with preceding range.
|
48
Kernel/Memory/VirtualRangeAllocator.h
Normal file
48
Kernel/Memory/VirtualRangeAllocator.h
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: BSD-2-Clause
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <AK/RedBlackTree.h>
|
||||||
|
#include <AK/Traits.h>
|
||||||
|
#include <Kernel/Memory/VirtualRange.h>
|
||||||
|
#include <Kernel/SpinLock.h>
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class VirtualRangeAllocator {
|
||||||
|
public:
|
||||||
|
VirtualRangeAllocator();
|
||||||
|
~VirtualRangeAllocator() = default;
|
||||||
|
|
||||||
|
void initialize_with_range(VirtualAddress, size_t);
|
||||||
|
void initialize_from_parent(VirtualRangeAllocator const&);
|
||||||
|
|
||||||
|
Optional<VirtualRange> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
||||||
|
Optional<VirtualRange> allocate_specific(VirtualAddress, size_t);
|
||||||
|
Optional<VirtualRange> allocate_randomized(size_t, size_t alignment);
|
||||||
|
void deallocate(VirtualRange const&);
|
||||||
|
|
||||||
|
void dump() const;
|
||||||
|
|
||||||
|
bool contains(VirtualRange const& range) const { return m_total_range.contains(range); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
void carve_at_iterator(auto&, VirtualRange const&);
|
||||||
|
|
||||||
|
RedBlackTree<FlatPtr, VirtualRange> m_available_ranges;
|
||||||
|
VirtualRange m_total_range;
|
||||||
|
mutable SpinLock<u8> m_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace AK {
|
||||||
|
template<>
|
||||||
|
struct Traits<Kernel::Memory::VirtualRange> : public GenericTraits<Kernel::Memory::VirtualRange> {
|
||||||
|
static constexpr bool is_trivial() { return true; }
|
||||||
|
};
|
||||||
|
}
|
|
@ -74,7 +74,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline static void add_unmap_perf_event(Process& current_process, Memory::Range const& region)
|
inline static void add_unmap_perf_event(Process& current_process, Memory::VirtualRange const& region)
|
||||||
{
|
{
|
||||||
if (auto* event_buffer = current_process.current_perf_events_buffer()) {
|
if (auto* event_buffer = current_process.current_perf_events_buffer()) {
|
||||||
[[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, region.base().get(), region.size(), nullptr);
|
[[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, region.base().get(), region.size(), nullptr);
|
||||||
|
|
|
@ -47,7 +47,7 @@ RamdiskController::RamdiskController()
|
||||||
// Populate ramdisk controllers from Multiboot boot modules, if any.
|
// Populate ramdisk controllers from Multiboot boot modules, if any.
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
for (auto& used_memory_range : MM.used_memory_ranges()) {
|
for (auto& used_memory_range : MM.used_memory_ranges()) {
|
||||||
if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) {
|
if (used_memory_range.type == Memory::UsedMemoryVirtualRangeType::BootModule) {
|
||||||
size_t length = Memory::page_round_up(used_memory_range.end.get()) - used_memory_range.start.get();
|
size_t length = Memory::page_round_up(used_memory_range.end.get()) - used_memory_range.start.get();
|
||||||
auto region = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
auto region = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
||||||
if (!region)
|
if (!region)
|
||||||
|
|
|
@ -154,12 +154,12 @@ static KResultOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]
|
||||||
return new_sp;
|
return new_sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RequiredLoadRange {
|
struct RequiredLoadVirtualRange {
|
||||||
FlatPtr start { 0 };
|
FlatPtr start { 0 };
|
||||||
FlatPtr end { 0 };
|
FlatPtr end { 0 };
|
||||||
};
|
};
|
||||||
|
|
||||||
static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& program_description)
|
static KResultOr<RequiredLoadVirtualRange> get_required_load_range(FileDescription& program_description)
|
||||||
{
|
{
|
||||||
auto& inode = *(program_description.inode());
|
auto& inode = *(program_description.inode());
|
||||||
auto vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode);
|
auto vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode);
|
||||||
|
@ -181,7 +181,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
RequiredLoadRange range {};
|
RequiredLoadVirtualRange range {};
|
||||||
elf_image.for_each_program_header([&range](const auto& pheader) {
|
elf_image.for_each_program_header([&range](const auto& pheader) {
|
||||||
if (pheader.type() != PT_LOAD)
|
if (pheader.type() != PT_LOAD)
|
||||||
return;
|
return;
|
||||||
|
@ -221,7 +221,7 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header
|
||||||
|
|
||||||
auto main_program_load_range = main_program_load_range_result.value();
|
auto main_program_load_range = main_program_load_range_result.value();
|
||||||
|
|
||||||
RequiredLoadRange selected_range {};
|
RequiredLoadVirtualRange selected_range {};
|
||||||
|
|
||||||
if (interpreter_description) {
|
if (interpreter_description) {
|
||||||
auto interpreter_load_range_result = get_required_load_range(*interpreter_description);
|
auto interpreter_load_range_result = get_required_load_range(*interpreter_description);
|
||||||
|
@ -235,8 +235,8 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header
|
||||||
if (main_program_load_range.end < load_range_start || main_program_load_range.start > interpreter_load_range_end)
|
if (main_program_load_range.end < load_range_start || main_program_load_range.start > interpreter_load_range_end)
|
||||||
return random_load_offset_in_range(load_range_start, load_range_size);
|
return random_load_offset_in_range(load_range_start, load_range_size);
|
||||||
|
|
||||||
RequiredLoadRange first_available_part = { load_range_start, main_program_load_range.start };
|
RequiredLoadVirtualRange first_available_part = { load_range_start, main_program_load_range.start };
|
||||||
RequiredLoadRange second_available_part = { main_program_load_range.end, interpreter_load_range_end };
|
RequiredLoadVirtualRange second_available_part = { main_program_load_range.end, interpreter_load_range_end };
|
||||||
|
|
||||||
// Select larger part
|
// Select larger part
|
||||||
if (first_available_part.end - first_available_part.start > second_available_part.end - second_available_part.start)
|
if (first_available_part.end - first_available_part.start > second_available_part.end - second_available_part.start)
|
||||||
|
|
|
@ -129,7 +129,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
// acquiring the queue lock
|
// acquiring the queue lock
|
||||||
RefPtr<Memory::VMObject> vmobject, vmobject2;
|
RefPtr<Memory::VMObject> vmobject, vmobject2;
|
||||||
if (!is_private) {
|
if (!is_private) {
|
||||||
auto region = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
auto region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
||||||
if (!region)
|
if (!region)
|
||||||
return EFAULT;
|
return EFAULT;
|
||||||
vmobject = region->vmobject();
|
vmobject = region->vmobject();
|
||||||
|
@ -139,7 +139,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
case FUTEX_REQUEUE:
|
case FUTEX_REQUEUE:
|
||||||
case FUTEX_CMP_REQUEUE:
|
case FUTEX_CMP_REQUEUE:
|
||||||
case FUTEX_WAKE_OP: {
|
case FUTEX_WAKE_OP: {
|
||||||
auto region2 = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
auto region2 = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
||||||
if (!region2)
|
if (!region2)
|
||||||
return EFAULT;
|
return EFAULT;
|
||||||
vmobject2 = region2->vmobject();
|
vmobject2 = region2->vmobject();
|
||||||
|
|
|
@ -14,7 +14,7 @@ KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_
|
||||||
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
|
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
|
||||||
auto& regs = Thread::current()->get_register_dump_from_stack();
|
auto& regs = Thread::current()->get_register_dump_from_stack();
|
||||||
FlatPtr stack_pointer = regs.userspace_sp();
|
FlatPtr stack_pointer = regs.userspace_sp();
|
||||||
auto* stack_region = space().find_region_containing(Memory::Range { VirtualAddress(stack_pointer), 1 });
|
auto* stack_region = space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 });
|
||||||
|
|
||||||
// The syscall handler should have killed us if we had an invalid stack pointer.
|
// The syscall handler should have killed us if we had an invalid stack pointer.
|
||||||
VERIFY(stack_region);
|
VERIFY(stack_region);
|
||||||
|
|
|
@ -199,7 +199,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
Memory::Region* region = nullptr;
|
Memory::Region* region = nullptr;
|
||||||
Optional<Memory::Range> range;
|
Optional<Memory::VirtualRange> range;
|
||||||
|
|
||||||
if (map_randomized) {
|
if (map_randomized) {
|
||||||
range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
|
range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
|
||||||
|
@ -272,7 +272,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
|
||||||
return region->vaddr().get();
|
return region->vaddr().get();
|
||||||
}
|
}
|
||||||
|
|
||||||
static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
static KResultOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
||||||
{
|
{
|
||||||
if (Memory::page_round_up_would_wrap(size))
|
if (Memory::page_round_up_would_wrap(size))
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
@ -286,7 +286,7 @@ static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address,
|
||||||
auto base = VirtualAddress { address }.page_base();
|
auto base = VirtualAddress { address }.page_base();
|
||||||
auto end = Memory::page_round_up(address + size);
|
auto end = Memory::page_round_up(address + size);
|
||||||
|
|
||||||
return Memory::Range { base, end - base.get() };
|
return Memory::VirtualRange { base, end - base.get() };
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
|
KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
|
||||||
|
@ -346,7 +346,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
||||||
auto region = space().take_region(*old_region);
|
auto region = space().take_region(*old_region);
|
||||||
|
|
||||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
|
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||||
|
|
||||||
// This vector is the region(s) adjacent to our range.
|
// This vector is the region(s) adjacent to our range.
|
||||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||||
|
@ -409,7 +409,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
||||||
auto region = space().take_region(*old_region);
|
auto region = space().take_region(*old_region);
|
||||||
|
|
||||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
|
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||||
|
|
||||||
// This vector is the region(s) adjacent to our range.
|
// This vector is the region(s) adjacent to our range.
|
||||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||||
|
@ -566,7 +566,7 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
|
||||||
auto old_name = old_region->take_name();
|
auto old_name = old_region->take_name();
|
||||||
|
|
||||||
// Unmap without deallocating the VM range since we're going to reuse it.
|
// Unmap without deallocating the VM range since we're going to reuse it.
|
||||||
old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
|
old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||||
space().deallocate_region(*old_region);
|
space().deallocate_region(*old_region);
|
||||||
|
|
||||||
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
|
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
|
||||||
|
@ -657,7 +657,7 @@ KResultOr<FlatPtr> Process::sys$msyscall(Userspace<void*> address)
|
||||||
if (!Memory::is_user_address(VirtualAddress { address }))
|
if (!Memory::is_user_address(VirtualAddress { address }))
|
||||||
return EFAULT;
|
return EFAULT;
|
||||||
|
|
||||||
auto* region = space().find_region_containing(Memory::Range { VirtualAddress { address }, 1 });
|
auto* region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 });
|
||||||
if (!region)
|
if (!region)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -194,7 +194,7 @@ KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address)
|
||||||
|
|
||||||
KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
|
KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
|
||||||
{
|
{
|
||||||
Memory::Range range = { VirtualAddress(address), sizeof(u32) };
|
Memory::VirtualRange range = { VirtualAddress(address), sizeof(u32) };
|
||||||
auto* region = space().find_region_containing(range);
|
auto* region = space().find_region_containing(range);
|
||||||
if (!region)
|
if (!region)
|
||||||
return EFAULT;
|
return EFAULT;
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
#include <Kernel/KResult.h>
|
#include <Kernel/KResult.h>
|
||||||
#include <Kernel/KString.h>
|
#include <Kernel/KString.h>
|
||||||
#include <Kernel/LockMode.h>
|
#include <Kernel/LockMode.h>
|
||||||
#include <Kernel/Memory/Range.h>
|
#include <Kernel/Memory/VirtualRange.h>
|
||||||
#include <Kernel/Scheduler.h>
|
#include <Kernel/Scheduler.h>
|
||||||
#include <Kernel/TimerQueue.h>
|
#include <Kernel/TimerQueue.h>
|
||||||
#include <Kernel/UnixTypes.h>
|
#include <Kernel/UnixTypes.h>
|
||||||
|
@ -1308,7 +1308,7 @@ private:
|
||||||
FlatPtr m_kernel_stack_top { 0 };
|
FlatPtr m_kernel_stack_top { 0 };
|
||||||
OwnPtr<Memory::Region> m_kernel_stack_region;
|
OwnPtr<Memory::Region> m_kernel_stack_region;
|
||||||
VirtualAddress m_thread_specific_data;
|
VirtualAddress m_thread_specific_data;
|
||||||
Optional<Memory::Range> m_thread_specific_range;
|
Optional<Memory::VirtualRange> m_thread_specific_range;
|
||||||
Array<SignalActionData, NSIG> m_signal_action_data;
|
Array<SignalActionData, NSIG> m_signal_action_data;
|
||||||
Blocker* m_blocker { nullptr };
|
Blocker* m_blocker { nullptr };
|
||||||
Kernel::Mutex* m_blocking_lock { nullptr };
|
Kernel::Mutex* m_blocking_lock { nullptr };
|
||||||
|
|
Loading…
Reference in a new issue