diff --git a/Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.cpp b/Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.cpp index 47401278b5c..0981ede6716 100644 --- a/Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.cpp +++ b/Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.cpp @@ -31,7 +31,6 @@ void NVMeInterruptQueue::initialize_interrupt_queue() bool NVMeInterruptQueue::handle_irq(RegisterState const&) { - SpinlockLocker lock(m_request_lock); return process_cq() ? true : false; } @@ -47,14 +46,15 @@ void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status) }); if (work_item_creation_result.is_error()) { - SpinlockLocker lock(m_request_lock); - auto& request_pdu = m_requests.get(cmdid).release_value(); - auto current_request = request_pdu.request; + m_requests.with([cmdid, status](auto& requests) { + auto& request_pdu = requests.get(cmdid).release_value(); + auto current_request = request_pdu.request; - current_request->complete(AsyncDeviceRequest::OutOfMemory); - if (request_pdu.end_io_handler) - request_pdu.end_io_handler(status); - request_pdu.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + if (request_pdu.end_io_handler) + request_pdu.end_io_handler(status); + request_pdu.clear(); + }); } } } diff --git a/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp b/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp index 7c6e2d39f87..8e2787e909f 100644 --- a/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp +++ b/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp @@ -41,7 +41,9 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr rw_dma_regio , m_rw_dma_page(rw_dma_page) { - m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors(); + m_requests.with([q_depth](auto& requests) { + requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors(); + }); m_sqe_array = { reinterpret_cast(m_sq_dma_region->vaddr().as_ptr()), m_qdepth }; m_cqe_array = { reinterpret_cast(m_cq_dma_region->vaddr().as_ptr()), m_qdepth }; } @@ -66,21 +68,23 @@ void NVMeQueue::update_cqe_head() u32 NVMeQueue::process_cq() { u32 nr_of_processed_cqes = 0; - while (cqe_available()) { - u16 status; - u16 cmdid; - ++nr_of_processed_cqes; - status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status); - cmdid = m_cqe_array[m_cq_head].command_id; - dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head); + m_requests.with([this, &nr_of_processed_cqes](auto& requests) { + while (cqe_available()) { + u16 status; + u16 cmdid; + ++nr_of_processed_cqes; + status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status); + cmdid = m_cqe_array[m_cq_head].command_id; + dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head); - if (!m_requests.contains(cmdid)) { - dmesgln("Bogus cmd id: {}", cmdid); - VERIFY_NOT_REACHED(); + if (!requests.contains(cmdid)) { + dmesgln("Bogus cmd id: {}", cmdid); + VERIFY_NOT_REACHED(); + } + complete_current_request(cmdid, status); + update_cqe_head(); } - complete_current_request(cmdid, status); - update_cqe_head(); - } + }); if (nr_of_processed_cqes) { update_cq_doorbell(); } @@ -106,35 +110,36 @@ void NVMeQueue::submit_sqe(NVMeSubmission& sub) void NVMeQueue::complete_current_request(u16 cmdid, u16 status) { - SpinlockLocker lock(m_request_lock); - auto& request_pdu = m_requests.get(cmdid).release_value(); - auto current_request = request_pdu.request; - AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success; + m_requests.with([this, cmdid, status](auto& requests) { + auto& request_pdu = requests.get(cmdid).release_value(); + auto current_request = request_pdu.request; + AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success; - ScopeGuard guard = [&req_result, status, &request_pdu] { - if (request_pdu.request) - request_pdu.request->complete(req_result); - if (request_pdu.end_io_handler) - request_pdu.end_io_handler(status); - request_pdu.clear(); - }; + ScopeGuard guard = [&req_result, status, &request_pdu] { + if (request_pdu.request) + request_pdu.request->complete(req_result); + if (request_pdu.end_io_handler) + request_pdu.end_io_handler(status); + request_pdu.clear(); + }; - // There can be submission without any request associated with it such as with - // admin queue commands during init. If there is no request, we are done - if (!current_request) - return; + // There can be submission without any request associated with it such as with + // admin queue commands during init. If there is no request, we are done + if (!current_request) + return; - if (status) { - req_result = AsyncBlockDeviceRequest::Failure; - return; - } - - if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) { - if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) { - req_result = AsyncBlockDeviceRequest::MemoryFault; + if (status) { + req_result = AsyncBlockDeviceRequest::Failure; return; } - } + + if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) { + if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) { + req_result = AsyncBlockDeviceRequest::MemoryFault; + return; + } + } + }); } u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub) @@ -144,10 +149,9 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub) u16 cid = get_request_cid(); sub.cmdid = cid; - { - SpinlockLocker req_lock(m_request_lock); - m_requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } }); - } + m_requests.with([this, &sub, &cmd_status](auto& requests) { + requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } }); + }); submit_sqe(sub); // FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to @@ -167,10 +171,9 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 sub.rw.data_ptr.prp1 = reinterpret_cast(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr())); sub.cmdid = get_request_cid(); - { - SpinlockLocker req_lock(m_request_lock); - m_requests.set(sub.cmdid, { request, nullptr }); - } + m_requests.with([&sub, &request](auto& requests) { + requests.set(sub.cmdid, { request, nullptr }); + }); full_memory_barrier(); submit_sqe(sub); @@ -188,10 +191,9 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 sub.rw.data_ptr.prp1 = reinterpret_cast(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr())); sub.cmdid = get_request_cid(); - { - SpinlockLocker req_lock(m_request_lock); - m_requests.set(sub.cmdid, { request, nullptr }); - } + m_requests.with([&sub, &request](auto& requests) { + requests.set(sub.cmdid, { request, nullptr }); + }); if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) { complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault); diff --git a/Kernel/Devices/Storage/NVMe/NVMeQueue.h b/Kernel/Devices/Storage/NVMe/NVMeQueue.h index 6ad5386283c..3bc15db5973 100644 --- a/Kernel/Devices/Storage/NVMe/NVMeQueue.h +++ b/Kernel/Devices/Storage/NVMe/NVMeQueue.h @@ -113,9 +113,8 @@ private: protected: Spinlock m_cq_lock {}; - HashMap m_requests; + SpinlockProtected, LockRank::None> m_requests; NonnullOwnPtr m_rw_dma_region; - RecursiveSpinlock m_request_lock {}; private: u16 m_qid {};