Kernel: Switch to SpinlockProtected to protect NVMeQueue's requests map

This helps ensure no one accidentally accesses m_requests without first
locking it's spinlock. In fact this change fixed such a case, since
process_cq() implicitly assumed the caller locked the lock, which was
not the case for NVMePollQueue::submit_sqe().
This commit is contained in:
Idan Horowitz 2024-02-09 17:23:37 +02:00 committed by Andreas Kling
parent 263127f21a
commit 45aee20ea9
Notes: sideshowbarker 2024-07-17 00:25:35 +09:00
3 changed files with 61 additions and 60 deletions

View file

@ -31,7 +31,6 @@ void NVMeInterruptQueue::initialize_interrupt_queue()
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
{
SpinlockLocker lock(m_request_lock);
return process_cq() ? true : false;
}
@ -47,14 +46,15 @@ void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
});
if (work_item_creation_result.is_error()) {
SpinlockLocker lock(m_request_lock);
auto& request_pdu = m_requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
m_requests.with([cmdid, status](auto& requests) {
auto& request_pdu = requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
current_request->complete(AsyncDeviceRequest::OutOfMemory);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
});
}
}
}

View file

@ -41,7 +41,9 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_regio
, m_rw_dma_page(rw_dma_page)
{
m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
m_requests.with([q_depth](auto& requests) {
requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
});
m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
}
@ -66,21 +68,23 @@ void NVMeQueue::update_cqe_head()
u32 NVMeQueue::process_cq()
{
u32 nr_of_processed_cqes = 0;
while (cqe_available()) {
u16 status;
u16 cmdid;
++nr_of_processed_cqes;
status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
cmdid = m_cqe_array[m_cq_head].command_id;
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
m_requests.with([this, &nr_of_processed_cqes](auto& requests) {
while (cqe_available()) {
u16 status;
u16 cmdid;
++nr_of_processed_cqes;
status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
cmdid = m_cqe_array[m_cq_head].command_id;
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
if (!m_requests.contains(cmdid)) {
dmesgln("Bogus cmd id: {}", cmdid);
VERIFY_NOT_REACHED();
if (!requests.contains(cmdid)) {
dmesgln("Bogus cmd id: {}", cmdid);
VERIFY_NOT_REACHED();
}
complete_current_request(cmdid, status);
update_cqe_head();
}
complete_current_request(cmdid, status);
update_cqe_head();
}
});
if (nr_of_processed_cqes) {
update_cq_doorbell();
}
@ -106,35 +110,36 @@ void NVMeQueue::submit_sqe(NVMeSubmission& sub)
void NVMeQueue::complete_current_request(u16 cmdid, u16 status)
{
SpinlockLocker lock(m_request_lock);
auto& request_pdu = m_requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
m_requests.with([this, cmdid, status](auto& requests) {
auto& request_pdu = requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
ScopeGuard guard = [&req_result, status, &request_pdu] {
if (request_pdu.request)
request_pdu.request->complete(req_result);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
};
ScopeGuard guard = [&req_result, status, &request_pdu] {
if (request_pdu.request)
request_pdu.request->complete(req_result);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
};
// There can be submission without any request associated with it such as with
// admin queue commands during init. If there is no request, we are done
if (!current_request)
return;
// There can be submission without any request associated with it such as with
// admin queue commands during init. If there is no request, we are done
if (!current_request)
return;
if (status) {
req_result = AsyncBlockDeviceRequest::Failure;
return;
}
if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
req_result = AsyncBlockDeviceRequest::MemoryFault;
if (status) {
req_result = AsyncBlockDeviceRequest::Failure;
return;
}
}
if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
req_result = AsyncBlockDeviceRequest::MemoryFault;
return;
}
}
});
}
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
@ -144,10 +149,9 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
u16 cid = get_request_cid();
sub.cmdid = cid;
{
SpinlockLocker req_lock(m_request_lock);
m_requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
}
m_requests.with([this, &sub, &cmd_status](auto& requests) {
requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
});
submit_sqe(sub);
// FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
@ -167,10 +171,9 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
sub.cmdid = get_request_cid();
{
SpinlockLocker req_lock(m_request_lock);
m_requests.set(sub.cmdid, { request, nullptr });
}
m_requests.with([&sub, &request](auto& requests) {
requests.set(sub.cmdid, { request, nullptr });
});
full_memory_barrier();
submit_sqe(sub);
@ -188,10 +191,9 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
sub.cmdid = get_request_cid();
{
SpinlockLocker req_lock(m_request_lock);
m_requests.set(sub.cmdid, { request, nullptr });
}
m_requests.with([&sub, &request](auto& requests) {
requests.set(sub.cmdid, { request, nullptr });
});
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);

View file

@ -113,9 +113,8 @@ private:
protected:
Spinlock<LockRank::Interrupts> m_cq_lock {};
HashMap<u16, NVMeIO> m_requests;
SpinlockProtected<HashMap<u16, NVMeIO>, LockRank::None> m_requests;
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
RecursiveSpinlock<LockRank::None> m_request_lock {};
private:
u16 m_qid {};