Kernel: Rename Memory::PhysicalPage to Memory::PhysicalRAMPage

Since these are now only used to represent RAM pages, (and not MMIO
pages) rename them to make their purpose more obvious.
This commit is contained in:
Idan Horowitz 2024-05-11 18:15:51 +03:00 committed by Andrew Kaster
parent 827322c139
commit 26cff62a0a
Notes: sideshowbarker 2024-07-16 22:58:46 +09:00
46 changed files with 192 additions and 192 deletions

View file

@ -109,13 +109,13 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
{
// Adopt the page tables already set up by boot.S
dmesgln("MM: boot_pml4t @ {}", boot_pml4t);
m_root_table = PhysicalPage::create(boot_pml4t, MayReturnToFreeList::No);
m_root_table = PhysicalRAMPage::create(boot_pml4t, MayReturnToFreeList::No);
dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
dmesgln("MM: boot_pd0 @ {}", boot_pd0);
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
m_directory_table = PhysicalRAMPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalRAMPage::create(boot_pd0, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalRAMPage::create(boot_pd_kernel, MayReturnToFreeList::No);
}
PageDirectory::~PageDirectory()

View file

@ -17,7 +17,7 @@
#include <Kernel/Forward.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
namespace Kernel::Memory {
@ -211,9 +211,9 @@ private:
static void deregister_page_directory(PageDirectory* directory);
Process* m_process { nullptr };
RefPtr<PhysicalPage> m_root_table;
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[512];
RefPtr<PhysicalRAMPage> m_root_table;
RefPtr<PhysicalRAMPage> m_directory_table;
RefPtr<PhysicalRAMPage> m_directory_pages[512];
RecursiveSpinlock<LockRank::None> m_lock {};
};

View file

@ -99,8 +99,8 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
{
dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> VPN_2_OFFSET) & PAGE_TABLE_INDEX_MASK] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
m_directory_table = PhysicalRAMPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> VPN_2_OFFSET) & PAGE_TABLE_INDEX_MASK] = PhysicalRAMPage::create(boot_pd_kernel, MayReturnToFreeList::No);
}
PageDirectory::~PageDirectory()

View file

@ -13,7 +13,7 @@
#include <Kernel/Forward.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <AK/Platform.h>
VALIDATE_IS_RISCV64()
@ -197,8 +197,8 @@ private:
static void deregister_page_directory(PageDirectory* directory);
Process* m_process { nullptr };
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[512];
RefPtr<PhysicalRAMPage> m_directory_table;
RefPtr<PhysicalRAMPage> m_directory_pages[512];
RecursiveSpinlock<LockRank::None> m_lock {};
};

View file

@ -130,13 +130,13 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
{
// Adopt the page tables already set up by boot.S
dmesgln("MM: boot_pml4t @ {}", boot_pml4t);
m_pml4t = PhysicalPage::create(boot_pml4t, MayReturnToFreeList::No);
m_pml4t = PhysicalRAMPage::create(boot_pml4t, MayReturnToFreeList::No);
dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
dmesgln("MM: boot_pd0 @ {}", boot_pd0);
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
m_directory_table = PhysicalRAMPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalRAMPage::create(boot_pd0, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalRAMPage::create(boot_pd_kernel, MayReturnToFreeList::No);
}
PageDirectory::~PageDirectory()

View file

@ -16,7 +16,7 @@
#include <Kernel/Locking/LockRank.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
namespace Kernel::Memory {
@ -193,9 +193,9 @@ private:
static void deregister_page_directory(PageDirectory* directory);
Process* m_process { nullptr };
RefPtr<PhysicalPage> m_pml4t;
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[512];
RefPtr<PhysicalRAMPage> m_pml4t;
RefPtr<PhysicalRAMPage> m_directory_table;
RefPtr<PhysicalRAMPage> m_directory_pages[512];
RecursiveSpinlock<LockRank::None> m_lock {};
};

View file

@ -12,7 +12,7 @@
#include <Kernel/Bus/USB/USBPipe.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Memory/Region.h>
// TODO: Callback stuff in this class please!

View file

@ -238,7 +238,7 @@ set(KERNEL_SOURCES
Memory/InodeVMObject.cpp
Memory/MemoryManager.cpp
Memory/MMIOVMObject.cpp
Memory/PhysicalPage.cpp
Memory/PhysicalRAMPage.cpp
Memory/PhysicalRegion.cpp
Memory/PhysicalZone.cpp
Memory/PrivateInodeVMObject.cpp

View file

@ -14,7 +14,7 @@
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Sections.h>
#include <Kernel/Security/Random.h>
#include <Kernel/Tasks/WaitQueue.h>

View file

@ -54,7 +54,7 @@ ErrorOr<void> AHCIPort::allocate_resources_and_initialize_ports()
return {};
}
UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullRefPtr<Memory::PhysicalRAMPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
: m_port_index(port_index)
, m_hba_capabilities(hba_capabilities)
, m_identify_buffer_page(move(identify_buffer_page))
@ -413,7 +413,7 @@ Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_li
VERIFY(m_lock.is_locked());
VERIFY(request.block_count() > 0);
Vector<NonnullRefPtr<Memory::PhysicalPage>> allocated_dma_regions;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> allocated_dma_regions;
for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) {
allocated_dma_regions.append(m_dma_buffers.at(index));
}

View file

@ -19,7 +19,7 @@
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Memory/ScatterGatherList.h>
#include <Kernel/Sections.h>
#include <Kernel/Security/Random.h>
@ -55,7 +55,7 @@ private:
bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; }
bool initialize();
AHCIPort(AHCIController const&, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index);
AHCIPort(AHCIController const&, NonnullRefPtr<Memory::PhysicalRAMPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index);
ALWAYS_INLINE void clear_sata_error_register() const;
@ -108,11 +108,11 @@ private:
mutable bool m_wait_for_completion { false };
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_dma_buffers;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_command_table_pages;
RefPtr<Memory::PhysicalPage> m_command_list_page;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> m_dma_buffers;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> m_command_table_pages;
RefPtr<Memory::PhysicalRAMPage> m_command_list_page;
OwnPtr<Memory::Region> m_command_list_region;
RefPtr<Memory::PhysicalPage> m_fis_receive_page;
RefPtr<Memory::PhysicalRAMPage> m_fis_receive_page;
LockRefPtr<ATADevice> m_connected_device;
u32 m_port_index;
@ -122,7 +122,7 @@ private:
// it's probably better to just "cache" this here instead.
AHCI::HBADefinedCapabilities const m_hba_capabilities;
NonnullRefPtr<Memory::PhysicalPage> const m_identify_buffer_page;
NonnullRefPtr<Memory::PhysicalRAMPage> const m_identify_buffer_page;
volatile AHCI::PortRegisters& m_port_registers;
NonnullRefPtr<AHCIController> const m_parent_controller;

View file

@ -155,7 +155,7 @@ UNMAP_AFTER_INIT void NVMeController::set_admin_q_depth()
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
{
RefPtr<Memory::PhysicalPage> prp_dma_buffer;
RefPtr<Memory::PhysicalRAMPage> prp_dma_buffer;
OwnPtr<Memory::Region> prp_dma_region;
auto namespace_data_struct = TRY(ByteBuffer::create_zeroed(NVMe_IDENTIFY_SIZE));
u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)];
@ -219,7 +219,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
ErrorOr<void> NVMeController::identify_and_init_controller()
{
RefPtr<Memory::PhysicalPage> prp_dma_buffer;
RefPtr<Memory::PhysicalRAMPage> prp_dma_buffer;
OwnPtr<Memory::Region> prp_dma_region;
IdentifyController ctrl {};
@ -311,9 +311,9 @@ void NVMeController::complete_current_request([[maybe_unused]] AsyncDeviceReques
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(QueueType queue_type)
{
OwnPtr<Memory::Region> cq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> cq_dma_pages;
OwnPtr<Memory::Region> sq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> sq_dma_pages;
set_admin_q_depth();
auto cq_size = round_up_to_power_of_two(CQ_SIZE(ADMIN_QUEUE_SIZE), 4096);
auto sq_size = round_up_to_power_of_two(SQ_SIZE(ADMIN_QUEUE_SIZE), 4096);
@ -364,9 +364,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(QueueType queu
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, QueueType queue_type)
{
OwnPtr<Memory::Region> cq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> cq_dma_pages;
OwnPtr<Memory::Region> sq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> sq_dma_pages;
auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);

View file

@ -76,8 +76,8 @@ private:
Vector<NonnullLockRefPtr<NVMeQueue>> m_queues;
Vector<NonnullLockRefPtr<NVMeNameSpace>> m_namespaces;
Memory::TypedMapping<ControllerRegister volatile> m_controller_regs;
RefPtr<Memory::PhysicalPage> m_dbbuf_shadow_page;
RefPtr<Memory::PhysicalPage> m_dbbuf_eventidx_page;
RefPtr<Memory::PhysicalRAMPage> m_dbbuf_shadow_page;
RefPtr<Memory::PhysicalRAMPage> m_dbbuf_eventidx_page;
bool m_admin_queue_ready { false };
size_t m_device_count { 0 };
AK::Duration m_ready_timeout;

View file

@ -11,14 +11,14 @@
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> NVMeInterruptQueue::try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> NVMeInterruptQueue::try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
{
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(device, move(rw_dma_region), rw_dma_page, qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
queue->initialize_interrupt_queue();
return queue;
}
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))
, PCI::IRQHandler(device, irq)
{

View file

@ -14,14 +14,14 @@ namespace Kernel {
class NVMeInterruptQueue : public NVMeQueue
, public PCI::IRQHandler {
public:
static ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
static ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMeInterruptQueue() override {};
virtual StringView purpose() const override { return "NVMe"sv; }
void initialize_interrupt_queue();
protected:
NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
private:
virtual void complete_current_request(u16 cmdid, u16 status) override;

View file

@ -11,12 +11,12 @@
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMePollQueue>> NVMePollQueue::try_create(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
ErrorOr<NonnullLockRefPtr<NVMePollQueue>> NVMePollQueue::try_create(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
{
return TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
}
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))
{
}

View file

@ -12,12 +12,12 @@ namespace Kernel {
class NVMePollQueue : public NVMeQueue {
public:
static ErrorOr<NonnullLockRefPtr<NVMePollQueue>> try_create(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
static ErrorOr<NonnullLockRefPtr<NVMePollQueue>> try_create(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMePollQueue() override {};
protected:
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalRAMPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
private:
Spinlock<LockRank::Interrupts> m_cq_lock {};

View file

@ -15,7 +15,7 @@ namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& device, u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs, QueueType queue_type)
{
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
RefPtr<Memory::PhysicalPage> rw_dma_page;
RefPtr<Memory::PhysicalRAMPage> rw_dma_page;
auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
if (rw_dma_page.is_null())
@ -30,7 +30,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& devi
return queue;
}
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalRAMPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
: m_rw_dma_region(move(rw_dma_region))
, m_qid(qid)
, m_admin_queue(qid == 0)

View file

@ -83,7 +83,7 @@ protected:
m_db_regs.mmio_reg->sq_tail = m_sq_tail;
}
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalRAMPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs);
[[nodiscard]] u32 get_request_cid()
{
@ -130,6 +130,6 @@ private:
Span<NVMeCompletion> m_cqe_array;
WaitQueue m_sync_wait_queue;
Doorbell m_db_regs;
NonnullRefPtr<Memory::PhysicalPage const> const m_rw_dma_page;
NonnullRefPtr<Memory::PhysicalRAMPage const> const m_rw_dma_page;
};
}

View file

@ -13,7 +13,7 @@
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Security/Random.h>
#include <Kernel/Tasks/WaitQueue.h>

View file

@ -76,7 +76,7 @@ class InodeVMObject;
class MappedROM;
class MemoryManager;
class PageDirectory;
class PhysicalPage;
class PhysicalRAMPage;
class PhysicalRegion;
class PrivateInodeVMObject;
class Region;

View file

@ -9,7 +9,7 @@
#include <Kernel/Debug.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Tasks/Process.h>
namespace Kernel::Memory {
@ -92,7 +92,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_phys
{
auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::create(contiguous_physical_pages.span()));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalRAMPage>>::create(contiguous_physical_pages.span()));
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
}
@ -111,9 +111,9 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purg
return vmobject;
}
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>> physical_pages)
{
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::create(physical_pages));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalRAMPage>>::create(physical_pages));
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
}
@ -130,7 +130,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages)));
}
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
{
auto weak_parent = TRY(other.try_make_weak_ptr<AnonymousVMObject>());
auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages))));
@ -140,7 +140,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with
return vmobject;
}
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
: VMObject(move(new_physical_pages))
, m_unused_committed_pages(move(committed_pages))
{
@ -155,20 +155,20 @@ AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_phys
}
}
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
VERIFY(paddr.page_base() == paddr);
for (size_t i = 0; i < page_count(); ++i)
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No);
physical_pages()[i] = PhysicalRAMPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No);
}
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
}
AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
, m_cow_parent(move(other))
, m_shared_committed_cow_pages(move(shared_committed_cow_pages))
@ -271,7 +271,7 @@ ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged
return {};
}
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
NonnullRefPtr<PhysicalRAMPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
{
return m_unused_committed_pages->take_one();
}
@ -345,7 +345,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
return PageFaultResponse::Continue;
}
RefPtr<PhysicalPage> page;
RefPtr<PhysicalRAMPage> page;
if (m_shared_committed_cow_pages) {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!");
page = m_shared_committed_cow_pages->take_one();
@ -388,7 +388,7 @@ AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhy
AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default;
NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
NonnullRefPtr<PhysicalRAMPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
{
SpinlockLocker locker(m_lock);
return m_committed_pages.take_one();

View file

@ -20,12 +20,12 @@ public:
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>>);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
[[nodiscard]] NonnullRefPtr<PhysicalRAMPage> allocate_committed_page(Badge<Region>);
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
size_t cow_pages() const;
bool should_cow(size_t page_index, bool) const;
@ -41,12 +41,12 @@ public:
private:
class SharedCommittedCowPages;
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalRAMPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalRAMPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalRAMPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalRAMPage>>&&);
explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalRAMPage>>&&);
virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
@ -74,7 +74,7 @@ private:
[[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); }
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
[[nodiscard]] NonnullRefPtr<PhysicalRAMPage> take_one();
void uncommit_one();
private:

View file

@ -9,14 +9,14 @@
namespace Kernel::Memory {
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, Bitmap dirty_pages)
: VMObject(move(new_physical_pages))
, m_inode(inode)
, m_dirty_pages(move(dirty_pages))
{
}
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, Bitmap dirty_pages)
: VMObject(move(new_physical_pages))
, m_inode(other.m_inode)
, m_dirty_pages(move(dirty_pages))

View file

@ -28,8 +28,8 @@ public:
u32 writable_mappings() const;
protected:
explicit InodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit InodeVMObject(InodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit InodeVMObject(Inode&, FixedArray<RefPtr<PhysicalRAMPage>>&&, Bitmap dirty_pages);
explicit InodeVMObject(InodeVMObject const&, FixedArray<RefPtr<PhysicalRAMPage>>&&, Bitmap dirty_pages);
InodeVMObject& operator=(InodeVMObject const&) = delete;
InodeVMObject& operator=(InodeVMObject&&) = delete;

View file

@ -22,7 +22,7 @@ ErrorOr<NonnullLockRefPtr<MMIOVMObject>> MMIOVMObject::try_create_for_physical_r
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) MMIOVMObject(paddr, move(new_physical_pages)));
}
MMIOVMObject::MMIOVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
MMIOVMObject::MMIOVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
VERIFY(paddr.page_base() == paddr);

View file

@ -18,7 +18,7 @@ public:
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return ENOTSUP; }
private:
MMIOVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
MMIOVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalRAMPage>>&&);
virtual StringView class_name() const override { return "MMIOVMObject"sv; }
};

View file

@ -788,7 +788,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
auto physical_page = adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(MayReturnToFreeList::No));
auto physical_page = adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalRAMPage(MayReturnToFreeList::No));
// NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
(void)physical_page.leak_ref();
@ -827,7 +827,7 @@ PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physic
return m_physical_page_entries[physical_page_entry_index];
}
PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical_page)
PhysicalAddress MemoryManager::get_physical_address(PhysicalRAMPage const& physical_page)
{
PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, allocated.physical_page));
size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
@ -1065,7 +1065,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
return region;
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalRAMPage>& dma_buffer_page)
{
auto page = TRY(allocate_physical_page());
dma_buffer_page = page;
@ -1075,12 +1075,12 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
{
RefPtr<Memory::PhysicalPage> dma_buffer_page;
RefPtr<Memory::PhysicalRAMPage> dma_buffer_page;
return allocate_dma_buffer_page(name, access, dma_buffer_page);
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages)
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalRAMPage>>& dma_buffer_pages)
{
VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
@ -1091,7 +1091,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
{
VERIFY(!(size % PAGE_SIZE));
Vector<NonnullRefPtr<Memory::PhysicalPage>> dma_buffer_pages;
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> dma_buffer_pages;
return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
}
@ -1109,7 +1109,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
return region;
}
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> pages, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>> pages, StringView name, Region::Access access, Region::Cacheable cacheable)
{
auto vmobject = TRY(AnonymousVMObject::try_create_with_physical_pages(pages));
OwnPtr<KString> name_kstring;
@ -1218,9 +1218,9 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
});
}
RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
RefPtr<PhysicalRAMPage> MemoryManager::find_free_physical_page(bool committed)
{
RefPtr<PhysicalPage> page;
RefPtr<PhysicalRAMPage> page;
m_global_data.with([&](auto& global_data) {
if (committed) {
// Draw from the committed pages pool. We should always have these pages available
@ -1247,7 +1247,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
return page;
}
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
NonnullRefPtr<PhysicalRAMPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{
auto page = find_free_physical_page(true);
VERIFY(page);
@ -1260,9 +1260,9 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badg
return page.release_nonnull();
}
ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
ErrorOr<NonnullRefPtr<PhysicalRAMPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
return m_global_data.with([&](auto&) -> ErrorOr<NonnullRefPtr<PhysicalPage>> {
return m_global_data.with([&](auto&) -> ErrorOr<NonnullRefPtr<PhysicalRAMPage>> {
auto page = find_free_physical_page(false);
bool purged_pages = false;
@ -1317,12 +1317,12 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(Shoul
});
}
ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
{
VERIFY(!(size % PAGE_SIZE));
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> {
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> {
// We need to make sure we don't touch pages that we have committed to
if (global_data.system_memory_info.physical_pages_uncommitted < page_count)
return ENOMEM;
@ -1495,7 +1495,7 @@ CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
MM.uncommit_physical_pages({}, m_page_count);
}
NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
NonnullRefPtr<PhysicalRAMPage> CommittedPhysicalPageSet::take_one()
{
VERIFY(m_page_count > 0);
--m_page_count;
@ -1509,7 +1509,7 @@ void CommittedPhysicalPageSet::uncommit_one()
MM.uncommit_physical_pages({}, 1);
}
void MemoryManager::copy_physical_page(PhysicalPage& physical_page, u8 page_buffer[PAGE_SIZE])
void MemoryManager::copy_physical_page(PhysicalRAMPage& physical_page, u8 page_buffer[PAGE_SIZE])
{
auto* quickmapped_page = quickmap_page(physical_page);
memcpy(page_buffer, quickmapped_page, PAGE_SIZE);

View file

@ -14,7 +14,7 @@
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/MemorySections.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Memory/PhysicalRegion.h>
#include <Kernel/Memory/Region.h>
#include <Kernel/Memory/RegionTree.h>
@ -113,7 +113,7 @@ public:
bool is_empty() const { return m_page_count == 0; }
size_t page_count() const { return m_page_count; }
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
[[nodiscard]] NonnullRefPtr<PhysicalRAMPage> take_one();
void uncommit_one();
void operator=(CommittedPhysicalPageSet&&) = delete;
@ -163,19 +163,19 @@ public:
ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count);
void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> allocate_contiguous_physical_pages(size_t size);
NonnullRefPtr<PhysicalRAMPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalRAMPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> allocate_contiguous_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress);
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalRAMPage>& dma_buffer_page);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalRAMPage>>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_mmio_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>>, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_region_anywhere(size_t size, size_t alignment);
ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t);
@ -215,8 +215,8 @@ public:
void dump_kernel_regions();
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
PhysicalRAMPage& shared_zero_page() { return *m_shared_zero_page; }
PhysicalRAMPage& lazy_committed_page() { return *m_lazy_committed_page; }
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
@ -231,9 +231,9 @@ public:
bool is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress, size_t read_length) const;
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
PhysicalAddress get_physical_address(PhysicalPage const&);
PhysicalAddress get_physical_address(PhysicalRAMPage const&);
void copy_physical_page(PhysicalPage&, u8 page_buffer[PAGE_SIZE]);
void copy_physical_page(PhysicalRAMPage&, u8 page_buffer[PAGE_SIZE]);
IterationDecision for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)>);
@ -272,9 +272,9 @@ private:
static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
RefPtr<PhysicalPage> find_free_physical_page(bool);
RefPtr<PhysicalRAMPage> find_free_physical_page(bool);
ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
ALWAYS_INLINE u8* quickmap_page(PhysicalRAMPage& page)
{
return quickmap_page(page.paddr());
}
@ -296,8 +296,8 @@ private:
// and then never change. Atomic ref-counting covers that case without
// the need for additional synchronization.
LockRefPtr<PageDirectory> m_kernel_page_directory;
RefPtr<PhysicalPage> m_shared_zero_page;
RefPtr<PhysicalPage> m_lazy_committed_page;
RefPtr<PhysicalRAMPage> m_shared_zero_page;
RefPtr<PhysicalRAMPage> m_lazy_committed_page;
// NOTE: These are outside of GlobalData as they are initialized on startup,
// and then never change.
@ -307,12 +307,12 @@ private:
SpinlockProtected<GlobalData, LockRank::None> m_global_data;
};
inline bool PhysicalPage::is_shared_zero_page() const
inline bool PhysicalRAMPage::is_shared_zero_page() const
{
return this == &MM.shared_zero_page();
}
inline bool PhysicalPage::is_lazy_committed_page() const
inline bool PhysicalRAMPage::is_lazy_committed_page() const
{
return this == &MM.lazy_committed_page();
}

View file

@ -6,37 +6,37 @@
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
namespace Kernel::Memory {
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
NonnullRefPtr<PhysicalRAMPage> PhysicalRAMPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
{
auto& physical_page_entry = MM.get_physical_page_entry(paddr);
return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalRAMPage(may_return_to_freelist));
}
PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist)
PhysicalRAMPage::PhysicalRAMPage(MayReturnToFreeList may_return_to_freelist)
: m_may_return_to_freelist(may_return_to_freelist)
{
}
PhysicalAddress PhysicalPage::paddr() const
PhysicalAddress PhysicalRAMPage::paddr() const
{
return MM.get_physical_address(*this);
}
void PhysicalPage::free_this() const
void PhysicalRAMPage::free_this() const
{
auto paddr = MM.get_physical_address(*this);
if (m_may_return_to_freelist == MayReturnToFreeList::Yes) {
auto& this_as_freelist_entry = MM.get_physical_page_entry(paddr).freelist;
this->~PhysicalPage(); // delete in place
this->~PhysicalRAMPage(); // delete in place
this_as_freelist_entry.next_index = -1;
this_as_freelist_entry.prev_index = -1;
MM.deallocate_physical_page(paddr);
} else {
this->~PhysicalPage(); // delete in place
this->~PhysicalRAMPage(); // delete in place
}
}

View file

@ -16,9 +16,9 @@ enum class MayReturnToFreeList : bool {
Yes
};
class PhysicalPage {
AK_MAKE_NONCOPYABLE(PhysicalPage);
AK_MAKE_NONMOVABLE(PhysicalPage);
class PhysicalRAMPage {
AK_MAKE_NONCOPYABLE(PhysicalRAMPage);
AK_MAKE_NONMOVABLE(PhysicalRAMPage);
friend class MemoryManager;
@ -36,7 +36,7 @@ public:
free_this();
}
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
static NonnullRefPtr<PhysicalRAMPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
@ -44,8 +44,8 @@ public:
bool is_lazy_committed_page() const;
private:
explicit PhysicalPage(MayReturnToFreeList may_return_to_freelist);
~PhysicalPage() = default;
explicit PhysicalRAMPage(MayReturnToFreeList may_return_to_freelist);
~PhysicalRAMPage() = default;
void free_this() const;
@ -57,7 +57,7 @@ struct PhysicalPageEntry {
union {
// If it's a live PhysicalPage object:
struct {
PhysicalPage physical_page;
PhysicalRAMPage physical_page;
} allocated;
// If it's an entry in a PhysicalZone::Bucket's freelist.

View file

@ -74,7 +74,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(size_t page
return try_create(taken_lower, taken_upper);
}
Vector<NonnullRefPtr<PhysicalPage>> PhysicalRegion::take_contiguous_free_pages(size_t count)
Vector<NonnullRefPtr<PhysicalRAMPage>> PhysicalRegion::take_contiguous_free_pages(size_t count)
{
auto rounded_page_count = next_power_of_two(count);
auto order = count_trailing_zeroes(rounded_page_count);
@ -94,15 +94,15 @@ Vector<NonnullRefPtr<PhysicalPage>> PhysicalRegion::take_contiguous_free_pages(s
if (!page_base.has_value())
return {};
Vector<NonnullRefPtr<PhysicalPage>> physical_pages;
Vector<NonnullRefPtr<PhysicalRAMPage>> physical_pages;
physical_pages.ensure_capacity(count);
for (size_t i = 0; i < count; ++i)
physical_pages.append(PhysicalPage::create(page_base.value().offset(i * PAGE_SIZE)));
physical_pages.append(PhysicalRAMPage::create(page_base.value().offset(i * PAGE_SIZE)));
return physical_pages;
}
RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
RefPtr<PhysicalRAMPage> PhysicalRegion::take_free_page()
{
if (m_usable_zones.is_empty())
return nullptr;
@ -116,7 +116,7 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
m_full_zones.append(zone);
}
return PhysicalPage::create(page.value());
return PhysicalRAMPage::create(page.value());
}
void PhysicalRegion::return_page(PhysicalAddress paddr)

View file

@ -8,7 +8,7 @@
#include <AK/OwnPtr.h>
#include <AK/Vector.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Memory/PhysicalZone.h>
namespace Kernel::Memory {
@ -34,8 +34,8 @@ public:
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(size_t);
RefPtr<PhysicalPage> take_free_page();
Vector<NonnullRefPtr<PhysicalPage>> take_contiguous_free_pages(size_t count);
RefPtr<PhysicalRAMPage> take_free_page();
Vector<NonnullRefPtr<PhysicalRAMPage>> take_contiguous_free_pages(size_t count);
void return_page(PhysicalAddress);
private:

View file

@ -7,7 +7,7 @@
#include <AK/BuiltinWrappers.h>
#include <AK/Format.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRAMPage.h>
#include <Kernel/Memory/PhysicalZone.h>
namespace Kernel::Memory {

View file

@ -34,12 +34,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
}
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
{
}
PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
{
}

View file

@ -24,8 +24,8 @@ public:
private:
virtual bool is_private_inode() const override { return true; }
explicit PrivateInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit PrivateInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalRAMPage>>&&, Bitmap dirty_pages);
explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<RefPtr<PhysicalRAMPage>>&&, Bitmap dirty_pages);
virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; }

View file

@ -210,7 +210,7 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
return {};
}
bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalRAMPage> page)
{
if (!page)
return map_individual_page_impl(page_index, {}, false, false);
@ -257,7 +257,7 @@ bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr,
bool Region::map_individual_page_impl(size_t page_index)
{
RefPtr<PhysicalPage> page;
RefPtr<PhysicalRAMPage> page;
{
SpinlockLocker vmobject_locker(vmobject().m_lock);
page = physical_page(page_index);
@ -266,7 +266,7 @@ bool Region::map_individual_page_impl(size_t page_index)
return map_individual_page_impl(page_index, page);
}
bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page)
bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalRAMPage> physical_page)
{
SpinlockLocker page_lock(m_page_directory->get_lock());
@ -487,7 +487,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
#endif
}
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, PhysicalPage& page_in_slot_at_time_of_fault)
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, PhysicalRAMPage& page_in_slot_at_time_of_fault)
{
VERIFY(vmobject().is_anonymous());
@ -497,7 +497,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica
if (current_thread != nullptr)
current_thread->did_zero_fault();
RefPtr<PhysicalPage> new_physical_page;
RefPtr<PhysicalRAMPage> new_physical_page;
if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
VERIFY(m_vmobject->is_anonymous());
@ -636,14 +636,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
RefPtr<PhysicalPage> Region::physical_page(size_t index) const
RefPtr<PhysicalRAMPage> Region::physical_page(size_t index) const
{
SpinlockLocker vmobject_locker(vmobject().m_lock);
VERIFY(index < page_count());
return vmobject().physical_pages()[first_page_index() + index];
}
RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
RefPtr<PhysicalRAMPage>& Region::physical_page_slot(size_t index)
{
VERIFY(vmobject().m_lock.is_locked_by_current_processor());
VERIFY(index < page_count());

View file

@ -163,8 +163,8 @@ public:
return size() / PAGE_SIZE;
}
RefPtr<PhysicalPage> physical_page(size_t index) const;
RefPtr<PhysicalPage>& physical_page_slot(size_t index);
RefPtr<PhysicalRAMPage> physical_page(size_t index) const;
RefPtr<PhysicalRAMPage>& physical_page_slot(size_t index);
[[nodiscard]] size_t offset_in_vmobject() const
{
@ -232,7 +232,7 @@ private:
Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
[[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage>);
[[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalRAMPage>);
void set_access_bit(Access access, bool b)
{
@ -244,10 +244,10 @@ private:
[[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index);
[[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index);
[[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
[[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalRAMPage& page_in_slot_at_time_of_fault);
[[nodiscard]] bool map_individual_page_impl(size_t page_index);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalRAMPage>);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, PhysicalAddress);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, PhysicalAddress, bool readable, bool writeable);

View file

@ -8,7 +8,7 @@
namespace Kernel::Memory {
ErrorOr<LockRefPtr<ScatterGatherList>> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size, StringView region_name)
ErrorOr<LockRefPtr<ScatterGatherList>> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalRAMPage>> allocated_pages, size_t device_block_size, StringView region_name)
{
auto vm_object = TRY(AnonymousVMObject::try_create_with_physical_pages(allocated_pages));
auto size = TRY(page_round_up((request.block_count() * device_block_size)));

View file

@ -19,7 +19,7 @@ namespace Kernel::Memory {
class ScatterGatherList final : public AtomicRefCounted<ScatterGatherList> {
public:
static ErrorOr<LockRefPtr<ScatterGatherList>> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size, StringView region_name);
static ErrorOr<LockRefPtr<ScatterGatherList>> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalRAMPage>> allocated_pages, size_t device_block_size, StringView region_name);
VMObject const& vmobject() const { return m_vm_object; }
VirtualAddress dma_region() const { return m_dma_region->vaddr(); }
size_t scatters_count() const { return m_vm_object->physical_pages().size(); }

View file

@ -56,21 +56,21 @@ ErrorOr<void> SharedFramebufferVMObject::create_real_writes_framebuffer_vm_objec
return {};
}
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
Span<RefPtr<PhysicalRAMPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
{
return m_real_framebuffer_vmobject->physical_pages();
}
ReadonlySpan<RefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
ReadonlySpan<RefPtr<PhysicalRAMPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
{
return m_real_framebuffer_vmobject->physical_pages();
}
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
Span<RefPtr<PhysicalRAMPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
{
return m_physical_pages.span();
}
ReadonlySpan<RefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
ReadonlySpan<RefPtr<PhysicalRAMPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
{
return m_physical_pages.span();
}
@ -92,14 +92,14 @@ void SharedFramebufferVMObject::switch_to_real_framebuffer_writes(Badge<Kernel::
});
}
ReadonlySpan<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages() const
ReadonlySpan<RefPtr<PhysicalRAMPage>> SharedFramebufferVMObject::physical_pages() const
{
SpinlockLocker locker(m_writes_state_lock);
if (m_writes_are_faked)
return VMObject::physical_pages();
return m_real_framebuffer_vmobject->physical_pages();
}
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
Span<RefPtr<PhysicalRAMPage>> SharedFramebufferVMObject::physical_pages()
{
SpinlockLocker locker(m_writes_state_lock);
if (m_writes_are_faked)
@ -107,7 +107,7 @@ Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
return m_real_framebuffer_vmobject->physical_pages();
}
SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
: VMObject(move(new_physical_pages))
, m_real_framebuffer_vmobject(real_framebuffer_vmobject)
, m_committed_pages(move(committed_pages))

View file

@ -22,15 +22,15 @@ public:
static ErrorOr<NonnullLockRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
private:
FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
, m_parent_object(parent_object)
{
}
virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; }
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
virtual ReadonlySpan<RefPtr<PhysicalPage>> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
virtual ReadonlySpan<RefPtr<PhysicalRAMPage>> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalRAMPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
};
@ -39,15 +39,15 @@ public:
static ErrorOr<NonnullLockRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
private:
RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
, m_parent_object(parent_object)
{
}
virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; }
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
virtual ReadonlySpan<RefPtr<PhysicalPage>> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
virtual ReadonlySpan<RefPtr<PhysicalRAMPage>> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalRAMPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
};
@ -60,14 +60,14 @@ public:
void switch_to_fake_sink_framebuffer_writes(Badge<Kernel::DisplayConnector>);
void switch_to_real_framebuffer_writes(Badge<Kernel::DisplayConnector>);
virtual ReadonlySpan<RefPtr<PhysicalPage>> physical_pages() const override;
virtual Span<RefPtr<PhysicalPage>> physical_pages() override;
virtual ReadonlySpan<RefPtr<PhysicalRAMPage>> physical_pages() const override;
virtual Span<RefPtr<PhysicalRAMPage>> physical_pages() override;
Span<RefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
ReadonlySpan<RefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages() const;
Span<RefPtr<PhysicalRAMPage>> fake_sink_framebuffer_physical_pages();
ReadonlySpan<RefPtr<PhysicalRAMPage>> fake_sink_framebuffer_physical_pages() const;
Span<RefPtr<PhysicalPage>> real_framebuffer_physical_pages();
ReadonlySpan<RefPtr<PhysicalPage>> real_framebuffer_physical_pages() const;
Span<RefPtr<PhysicalRAMPage>> real_framebuffer_physical_pages();
ReadonlySpan<RefPtr<PhysicalRAMPage>> real_framebuffer_physical_pages() const;
FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; }
FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; }
@ -76,7 +76,7 @@ public:
RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; }
private:
SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; }

View file

@ -39,12 +39,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone()
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
}
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
{
}
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
{
}

View file

@ -24,8 +24,8 @@ public:
private:
virtual bool is_shared_inode() const override { return true; }
explicit SharedInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit SharedInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalRAMPage>>&&, Bitmap dirty_pages);
explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<RefPtr<PhysicalRAMPage>>&&, Bitmap dirty_pages);
virtual StringView class_name() const override { return "SharedInodeVMObject"sv; }

View file

@ -17,17 +17,17 @@ SpinlockProtected<VMObject::AllInstancesList, LockRank::None>& VMObject::all_ins
return s_all_instances;
}
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
ErrorOr<FixedArray<RefPtr<PhysicalRAMPage>>> VMObject::try_clone_physical_pages() const
{
return m_physical_pages.clone();
}
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
ErrorOr<FixedArray<RefPtr<PhysicalRAMPage>>> VMObject::try_create_physical_pages(size_t size)
{
return FixedArray<RefPtr<PhysicalPage>>::create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
return FixedArray<RefPtr<PhysicalRAMPage>>::create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
}
VMObject::VMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
VMObject::VMObject(FixedArray<RefPtr<PhysicalRAMPage>>&& new_physical_pages)
: m_physical_pages(move(new_physical_pages))
{
all_instances().with([&](auto& list) { list.append(*this); });

View file

@ -35,8 +35,8 @@ public:
size_t page_count() const { return m_physical_pages.size(); }
virtual ReadonlySpan<RefPtr<PhysicalPage>> physical_pages() const { return m_physical_pages.span(); }
virtual Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
virtual ReadonlySpan<RefPtr<PhysicalRAMPage>> physical_pages() const { return m_physical_pages.span(); }
virtual Span<RefPtr<PhysicalRAMPage>> physical_pages() { return m_physical_pages.span(); }
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
@ -55,15 +55,15 @@ public:
}
protected:
static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const;
explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&);
static ErrorOr<FixedArray<RefPtr<PhysicalRAMPage>>> try_create_physical_pages(size_t);
ErrorOr<FixedArray<RefPtr<PhysicalRAMPage>>> try_clone_physical_pages() const;
explicit VMObject(FixedArray<RefPtr<PhysicalRAMPage>>&&);
template<typename Callback>
void for_each_region(Callback);
IntrusiveListNode<VMObject> m_list_node;
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
FixedArray<RefPtr<PhysicalRAMPage>> m_physical_pages;
mutable RecursiveSpinlock<LockRank::None> m_lock {};