Kernel: Add UNMAP_AFTER_INIT to NVMe member functions

NVMeController, NVMeQueue and NVMeNameSpace had functions which are not
used after init. So add them to UNMAP_AFTER_INIT section.
This commit is contained in:
Pankaj Raghav 2022-01-12 16:09:50 +05:30 committed by Idan Horowitz
parent 487377d1d7
commit 31c4c9724b
Notes: sideshowbarker 2024-07-17 21:16:31 +09:00
3 changed files with 12 additions and 12 deletions

View file

@ -18,7 +18,7 @@
namespace Kernel {
Atomic<u8> NVMeController::controller_id {};
ErrorOr<NonnullRefPtr<NVMeController>> NVMeController::try_initialize(const Kernel::PCI::DeviceIdentifier& device_identifier)
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<NVMeController>> NVMeController::try_initialize(const Kernel::PCI::DeviceIdentifier& device_identifier)
{
auto controller = TRY(adopt_nonnull_ref_or_enomem(new NVMeController(device_identifier)));
TRY(controller->initialize());
@ -26,13 +26,13 @@ ErrorOr<NonnullRefPtr<NVMeController>> NVMeController::try_initialize(const Kern
return controller;
}
NVMeController::NVMeController(const PCI::DeviceIdentifier& device_identifier)
UNMAP_AFTER_INIT NVMeController::NVMeController(const PCI::DeviceIdentifier& device_identifier)
: PCI::Device(device_identifier.address())
, m_pci_device_id(device_identifier)
{
}
ErrorOr<void> NVMeController::initialize()
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize()
{
// Nr of queues = one queue per core
auto nr_of_queues = Processor::count();
@ -138,7 +138,7 @@ bool NVMeController::start_controller()
return true;
}
u32 NVMeController::get_admin_q_dept()
UNMAP_AFTER_INIT u32 NVMeController::get_admin_q_dept()
{
u32 aqa = m_controller_regs->aqa;
// Queue depth is 0 based
@ -147,7 +147,7 @@ u32 NVMeController::get_admin_q_dept()
return q_depth;
}
ErrorOr<void> NVMeController::identify_and_init_namespaces()
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
{
RefPtr<Memory::PhysicalPage> prp_dma_buffer;
@ -213,7 +213,7 @@ ErrorOr<void> NVMeController::identify_and_init_namespaces()
return {};
}
Tuple<u64, u8> NVMeController::get_ns_features(IdentifyNamespace& identify_data_struct)
UNMAP_AFTER_INIT Tuple<u64, u8> NVMeController::get_ns_features(IdentifyNamespace& identify_data_struct)
{
auto flbas = identify_data_struct.flbas & FLBA_SIZE_MASK;
auto namespace_size = identify_data_struct.nsze;
@ -253,7 +253,7 @@ void NVMeController::complete_current_request([[maybe_unused]] AsyncDeviceReques
VERIFY_NOT_REACHED();
}
ErrorOr<void> NVMeController::create_admin_queue(u8 irq)
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(u8 irq)
{
auto qdepth = get_admin_q_dept();
OwnPtr<Memory::Region> cq_dma_region;
@ -296,7 +296,7 @@ ErrorOr<void> NVMeController::create_admin_queue(u8 irq)
return {};
}
ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
{
NVMeSubmission sub {};
OwnPtr<Memory::Region> cq_dma_region;

View file

@ -11,7 +11,7 @@
namespace Kernel {
ErrorOr<NonnullRefPtr<NVMeNameSpace>> NVMeNameSpace::try_create(NonnullRefPtrVector<NVMeQueue> queues, u8 controller_id, u16 nsid, size_t storage_size, size_t lba_size)
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<NVMeNameSpace>> NVMeNameSpace::try_create(NonnullRefPtrVector<NVMeQueue> queues, u8 controller_id, u16 nsid, size_t storage_size, size_t lba_size)
{
auto minor_number = StorageManagement::generate_storage_minor_number();
auto major_number = StorageManagement::storage_type_major_number();
@ -20,7 +20,7 @@ ErrorOr<NonnullRefPtr<NVMeNameSpace>> NVMeNameSpace::try_create(NonnullRefPtrVec
return device;
}
NVMeNameSpace::NVMeNameSpace(NonnullRefPtrVector<NVMeQueue> queues, size_t max_addresable_block, size_t lba_size, size_t major_number, size_t minor_number, u16 nsid, NonnullOwnPtr<KString> dev_name)
UNMAP_AFTER_INIT NVMeNameSpace::NVMeNameSpace(NonnullRefPtrVector<NVMeQueue> queues, size_t max_addresable_block, size_t lba_size, size_t major_number, size_t minor_number, u16 nsid, NonnullOwnPtr<KString> dev_name)
: StorageDevice(major_number, minor_number, lba_size, max_addresable_block, move(dev_name))
, m_nsid(nsid)
, m_queues(move(queues))

View file

@ -20,7 +20,7 @@ ErrorOr<NonnullRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_d
return queue;
}
NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
: IRQHandler(irq)
, m_qid(qid)
, m_admin_queue(qid == 0)
@ -38,7 +38,7 @@ NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
}
ErrorOr<void> NVMeQueue::create()
UNMAP_AFTER_INIT ErrorOr<void> NVMeQueue::create()
{
// DMA region for RW operation. For now the requests don't exceed more than 4096 bytes(Storage device takes of it)
auto buffer = TRY(MM.allocate_dma_buffer_page("Admin CQ queue", Memory::Region::Access::ReadWrite, m_rw_dma_page));