Explorar el Código

Kernel/Storage: Use more locking in the IDE code

This change should make it less possible for race conditions to happen
and cause fatal errors when accessing the hardware.
Liav A hace 4 años
padre
commit
b96e4c1308
Se han modificado 3 ficheros con 58 adiciones y 44 borrados
  1. 23 19
      Kernel/Storage/BMIDEChannel.cpp
  2. 32 23
      Kernel/Storage/IDEChannel.cpp
  3. 3 2
      Kernel/Storage/IDEChannel.h

+ 23 - 19
Kernel/Storage/BMIDEChannel.cpp

@@ -119,14 +119,14 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re
         dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
         dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
         ScopedSpinLock lock(m_request_lock);
         ScopedSpinLock lock(m_request_lock);
         VERIFY(m_current_request);
         VERIFY(m_current_request);
-        auto& request = *m_current_request;
-        m_current_request = nullptr;
+        auto current_request = m_current_request;
+        m_current_request.clear();
 
 
         if (result == AsyncDeviceRequest::Success) {
         if (result == AsyncDeviceRequest::Success) {
-            if (request.request_type() == AsyncBlockDeviceRequest::Read) {
-                if (!request.write_to_buffer(request.buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * request.block_count())) {
+            if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
+                if (!current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * current_request->block_count())) {
                     lock.unlock();
                     lock.unlock();
-                    request.complete(AsyncDeviceRequest::MemoryFault);
+                    current_request->complete(AsyncDeviceRequest::MemoryFault);
                     return;
                     return;
                 }
                 }
             }
             }
@@ -137,21 +137,23 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re
         }
         }
 
 
         lock.unlock();
         lock.unlock();
-        request.complete(result);
+        current_request->complete(result);
     });
     });
 }
 }
 
 
 void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
 void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
 {
 {
-    VERIFY(m_request_lock.is_locked());
-    auto& request = *m_current_request;
-    auto lba = request.block_index();
-    dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors_with_dma ({} x {})", lba, request.block_count());
+    VERIFY(m_lock.is_locked());
+    VERIFY(!m_current_request.is_null());
+    VERIFY(m_current_request->block_count() <= 256);
+
+    ScopedSpinLock m_lock(m_request_lock);
+    dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
 
 
     prdt().offset = m_dma_buffer_page->paddr().get();
     prdt().offset = m_dma_buffer_page->paddr().get();
-    prdt().size = 512 * request.block_count();
+    prdt().size = 512 * m_current_request->block_count();
 
 
-    if (!request.read_from_buffer(request.buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * request.block_count())) {
+    if (!m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * m_current_request->block_count())) {
         complete_current_request(AsyncDeviceRequest::MemoryFault);
         complete_current_request(AsyncDeviceRequest::MemoryFault);
         return;
         return;
     }
     }
@@ -167,7 +169,7 @@ void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
     // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
     // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
     m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
     m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
 
 
-    ata_access(Direction::Write, slave_request, lba, request.block_count(), capabilities);
+    ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
 
 
     // Start bus master
     // Start bus master
     m_io_group.bus_master_base().value().out<u8>(0x1);
     m_io_group.bus_master_base().value().out<u8>(0x1);
@@ -184,13 +186,15 @@ void BMIDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) co
 
 
 void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
 void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
 {
 {
-    VERIFY(m_request_lock.is_locked());
-    auto& request = *m_current_request;
-    auto lba = request.block_index();
-    dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors_with_dma ({} x {})", lba, request.block_count());
+    VERIFY(m_lock.is_locked());
+    VERIFY(!m_current_request.is_null());
+    VERIFY(m_current_request->block_count() <= 256);
+
+    ScopedSpinLock m_lock(m_request_lock);
+    dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
 
 
     prdt().offset = m_dma_buffer_page->paddr().get();
     prdt().offset = m_dma_buffer_page->paddr().get();
-    prdt().size = 512 * request.block_count();
+    prdt().size = 512 * m_current_request->block_count();
 
 
     VERIFY(prdt().size <= PAGE_SIZE);
     VERIFY(prdt().size <= PAGE_SIZE);
 
 
@@ -207,7 +211,7 @@ void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
     // Set transfer direction
     // Set transfer direction
     m_io_group.bus_master_base().value().out<u8>(0x8);
     m_io_group.bus_master_base().value().out<u8>(0x8);
 
 
-    ata_access(Direction::Read, slave_request, lba, request.block_count(), capabilities);
+    ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
 
 
     // Start bus master
     // Start bus master
     m_io_group.bus_master_base().value().out<u8>(0x9);
     m_io_group.bus_master_base().value().out<u8>(0x9);

+ 32 - 23
Kernel/Storage/IDEChannel.cpp

@@ -94,11 +94,12 @@ UNMAP_AFTER_INIT IDEChannel::~IDEChannel()
 
 
 void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave, u16 capabilities)
 void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave, u16 capabilities)
 {
 {
-    ScopedSpinLock lock(m_request_lock);
+    LOCKER(m_lock);
+    VERIFY(m_current_request.is_null());
 
 
     dbgln_if(PATA_DEBUG, "IDEChannel::start_request");
     dbgln_if(PATA_DEBUG, "IDEChannel::start_request");
 
 
-    m_current_request = &request;
+    m_current_request = request;
     m_current_request_block_index = 0;
     m_current_request_block_index = 0;
     m_current_request_flushing_cache = false;
     m_current_request_flushing_cache = false;
 
 
@@ -120,13 +121,11 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu
     // before Processor::deferred_call_queue returns!
     // before Processor::deferred_call_queue returns!
     g_io_work->queue([this, result]() {
     g_io_work->queue([this, result]() {
         dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
         dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
-        ScopedSpinLock lock(m_request_lock);
+        LOCKER(m_lock);
         VERIFY(m_current_request);
         VERIFY(m_current_request);
-        auto& request = *m_current_request;
-        m_current_request = nullptr;
-
-        lock.unlock();
-        request.complete(result);
+        auto current_request = m_current_request;
+        m_current_request.clear();
+        current_request->complete(result);
     });
     });
 }
 }
 
 
@@ -145,6 +144,7 @@ static void print_ide_status(u8 status)
 
 
 void IDEChannel::try_disambiguate_error()
 void IDEChannel::try_disambiguate_error()
 {
 {
+    VERIFY(m_lock.is_locked());
     dbgln("IDEChannel: Error cause:");
     dbgln("IDEChannel: Error cause:");
 
 
     switch (m_device_error) {
     switch (m_device_error) {
@@ -208,10 +208,12 @@ void IDEChannel::handle_irq(const RegisterState&)
     // Now schedule reading/writing the buffer as soon as we leave the irq handler.
     // Now schedule reading/writing the buffer as soon as we leave the irq handler.
     // This is important so that we can safely access the buffers, which could
     // This is important so that we can safely access the buffers, which could
     // trigger page faults
     // trigger page faults
-    Processor::deferred_call_queue([this]() {
+    g_io_work->queue([this]() {
+        LOCKER(m_lock);
         ScopedSpinLock lock(m_request_lock);
         ScopedSpinLock lock(m_request_lock);
         if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
         if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
             dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
             dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
+
             if (ata_do_read_sector()) {
             if (ata_do_read_sector()) {
                 if (++m_current_request_block_index >= m_current_request->block_count()) {
                 if (++m_current_request_block_index >= m_current_request->block_count()) {
                     complete_current_request(AsyncDeviceRequest::Success);
                     complete_current_request(AsyncDeviceRequest::Success);
@@ -355,6 +357,8 @@ UNMAP_AFTER_INIT void IDEChannel::detect_disks()
 
 
 void IDEChannel::ata_access(Direction direction, bool slave_request, u64 lba, u8 block_count, u16 capabilities)
 void IDEChannel::ata_access(Direction direction, bool slave_request, u64 lba, u8 block_count, u16 capabilities)
 {
 {
+    VERIFY(m_lock.is_locked());
+    VERIFY(m_request_lock.is_locked());
     LBAMode lba_mode;
     LBAMode lba_mode;
     u8 head = 0;
     u8 head = 0;
 
 
@@ -403,6 +407,9 @@ void IDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) cons
 
 
 bool IDEChannel::ata_do_read_sector()
 bool IDEChannel::ata_do_read_sector()
 {
 {
+    VERIFY(m_lock.is_locked());
+    VERIFY(m_request_lock.is_locked());
+    VERIFY(!m_current_request.is_null());
     dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_read_sector");
     dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_read_sector");
     auto& request = *m_current_request;
     auto& request = *m_current_request;
     auto out_buffer = request.buffer().offset(m_current_request_block_index * 512);
     auto out_buffer = request.buffer().offset(m_current_request_block_index * 512);
@@ -422,18 +429,21 @@ bool IDEChannel::ata_do_read_sector()
 // FIXME: This doesn't quite work and locks up reading LBA 3.
 // FIXME: This doesn't quite work and locks up reading LBA 3.
 void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
 void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
 {
 {
-    auto& request = *m_current_request;
-    VERIFY(request.block_count() <= 256);
-    dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
-
-    auto lba = request.block_index();
-    dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", request.block_count(), lba);
+    VERIFY(m_lock.is_locked());
+    VERIFY(!m_current_request.is_null());
+    VERIFY(m_current_request->block_count() <= 256);
 
 
-    ata_access(Direction::Read, slave_request, lba, request.block_count(), capabilities);
+    ScopedSpinLock m_lock(m_request_lock);
+    dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
+    dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
+    ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
 }
 }
 
 
 void IDEChannel::ata_do_write_sector()
 void IDEChannel::ata_do_write_sector()
 {
 {
+    VERIFY(m_lock.is_locked());
+    VERIFY(m_request_lock.is_locked());
+    VERIFY(!m_current_request.is_null());
     auto& request = *m_current_request;
     auto& request = *m_current_request;
 
 
     io_delay();
     io_delay();
@@ -457,14 +467,13 @@ void IDEChannel::ata_do_write_sector()
 // FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
 // FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
 void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
 void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
 {
 {
-    auto& request = *m_current_request;
-
-    VERIFY(request.block_count() <= 256);
-    auto start_sector = request.block_index();
-    u32 count = request.block_count();
-    dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", count, start_sector);
+    VERIFY(m_lock.is_locked());
+    VERIFY(!m_current_request.is_null());
+    VERIFY(m_current_request->block_count() <= 256);
 
 
-    ata_access(Direction::Write, slave_request, start_sector, request.block_count(), capabilities);
+    ScopedSpinLock m_lock(m_request_lock);
+    dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
+    ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
     ata_do_write_sector();
     ata_do_write_sector();
 }
 }
 }
 }

+ 3 - 2
Kernel/Storage/IDEChannel.h

@@ -163,10 +163,11 @@ protected:
     RefPtr<StorageDevice> m_master;
     RefPtr<StorageDevice> m_master;
     RefPtr<StorageDevice> m_slave;
     RefPtr<StorageDevice> m_slave;
 
 
-    AsyncBlockDeviceRequest* m_current_request { nullptr };
-    u32 m_current_request_block_index { 0 };
+    RefPtr<AsyncBlockDeviceRequest> m_current_request;
+    size_t m_current_request_block_index { 0 };
     bool m_current_request_flushing_cache { false };
     bool m_current_request_flushing_cache { false };
     SpinLock<u8> m_request_lock;
     SpinLock<u8> m_request_lock;
+    Lock m_lock { "IDEChannel" };
 
 
     IOAddressGroup m_io_group;
     IOAddressGroup m_io_group;
     NonnullRefPtr<IDEController> m_parent_controller;
     NonnullRefPtr<IDEController> m_parent_controller;