Bläddra i källkod

Kernel: Fix NVMe register access

We need to use the volatile keyword when mapping the device registers,
or the compiler may optimize access, which lead to this QEMU error:

pci_nvme_ub_mmiord_toosmall in nvme_mmio_read: MMIO read smaller than
32-bits, offset=0x0
Tom 3 år sedan
förälder
incheckning
d1e7b69004

+ 3 - 3
Kernel/Storage/NVMe/NVMeController.cpp

@@ -45,7 +45,7 @@ ErrorOr<void> NVMeController::initialize()
 
     // Map only until doorbell register for the controller
     // Queues will individually map the doorbell register respectively
-    m_controller_regs = Memory::map_typed_writable<ControllerRegister>(PhysicalAddress(m_bar));
+    m_controller_regs = Memory::map_typed_writable<volatile ControllerRegister>(PhysicalAddress(m_bar));
 
     calculate_doorbell_stride();
     TRY(create_admin_queue(irq));
@@ -250,7 +250,7 @@ ErrorOr<void> NVMeController::create_admin_queue(u8 irq)
         auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "Admin SQ queue", Memory::Region::Access::ReadWrite, sq_dma_pages));
         sq_dma_region = move(buffer);
     }
-    auto doorbell_regs = Memory::map_typed_writable<DoorbellRegister>(PhysicalAddress(m_bar + REG_SQ0TDBL_START));
+    auto doorbell_regs = Memory::map_typed_writable<volatile DoorbellRegister>(PhysicalAddress(m_bar + REG_SQ0TDBL_START));
 
     m_admin_queue = TRY(NVMeQueue::try_create(0, irq, qdepth, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs)));
 
@@ -317,7 +317,7 @@ ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
     }
 
     auto queue_doorbell_offset = REG_SQ0TDBL_START + ((2 * qid) * (4 << m_dbl_stride));
-    auto doorbell_regs = Memory::map_typed_writable<DoorbellRegister>(PhysicalAddress(m_bar + queue_doorbell_offset));
+    auto doorbell_regs = Memory::map_typed_writable<volatile DoorbellRegister>(PhysicalAddress(m_bar + queue_doorbell_offset));
 
     m_queues.append(TRY(NVMeQueue::try_create(qid, irq, IO_QUEUE_SIZE, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs))));
     m_queues.last().enable_interrupts();

+ 1 - 1
Kernel/Storage/NVMe/NVMeController.h

@@ -69,7 +69,7 @@ private:
     RefPtr<NVMeQueue> m_admin_queue;
     NonnullRefPtrVector<NVMeQueue> m_queues;
     NonnullRefPtrVector<NVMeNameSpace> m_namespaces;
-    Memory::TypedMapping<ControllerRegister> m_controller_regs;
+    Memory::TypedMapping<volatile ControllerRegister> m_controller_regs;
     bool m_admin_queue_ready { false };
     size_t m_device_count {};
     u32 m_bar;

+ 2 - 2
Kernel/Storage/NVMe/NVMeQueue.cpp

@@ -13,14 +13,14 @@
 
 namespace Kernel {
 
-ErrorOr<NonnullRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs)
+ErrorOr<NonnullRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
 {
     auto queue = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) NVMeQueue(qid, irq, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
     TRY(queue->create());
     return queue;
 }
 
-NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs)
+NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
     : IRQHandler(irq)
     , m_qid(qid)
     , m_admin_queue(qid == 0)

+ 3 - 3
Kernel/Storage/NVMe/NVMeQueue.h

@@ -29,9 +29,9 @@ class AsyncBlockDeviceRequest;
 class NVMeQueue : public IRQHandler
     , public RefCounted<NVMeQueue> {
 public:
-    static ErrorOr<NonnullRefPtr<NVMeQueue>> try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs);
+    static ErrorOr<NonnullRefPtr<NVMeQueue>> try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
     ErrorOr<void> create();
-    explicit NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs);
+    explicit NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
     bool is_admin_queue() { return m_admin_queue; };
     bool handle_irq(const RegisterState&) override;
     void submit_sqe(struct NVMeSubmission&);
@@ -73,7 +73,7 @@ private:
     NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
     Span<NVMeCompletion> m_cqe_array;
     OwnPtr<Memory::Region> m_rw_dma_region;
-    Memory::TypedMapping<DoorbellRegister> m_db_regs;
+    Memory::TypedMapping<volatile DoorbellRegister> m_db_regs;
     RefPtr<Memory::PhysicalPage> m_rw_dma_page;
     Spinlock m_request_lock;
     RefPtr<AsyncBlockDeviceRequest> m_current_request;