BMIDEChannel.cpp 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <Kernel/Storage/ATA.h>
  27. #include <Kernel/Storage/BMIDEChannel.h>
  28. #include <Kernel/Storage/IDEController.h>
  29. #include <Kernel/WorkQueue.h>
  30. namespace Kernel {
  31. UNMAP_AFTER_INIT NonnullRefPtr<BMIDEChannel> BMIDEChannel::create(const IDEController& ide_controller, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
  32. {
  33. return adopt(*new BMIDEChannel(ide_controller, io_group, type));
  34. }
  35. UNMAP_AFTER_INIT BMIDEChannel::BMIDEChannel(const IDEController& controller, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
  36. : IDEChannel(controller, io_group, type)
  37. {
  38. initialize();
  39. }
  40. UNMAP_AFTER_INIT void BMIDEChannel::initialize()
  41. {
  42. // Let's try to set up DMA transfers.
  43. PCI::enable_bus_mastering(m_parent_controller->pci_address());
  44. m_prdt_page = MM.allocate_supervisor_physical_page();
  45. m_dma_buffer_page = MM.allocate_supervisor_physical_page();
  46. if (m_dma_buffer_page.is_null() || m_prdt_page.is_null())
  47. return;
  48. m_prdt_region = MM.allocate_kernel_region(m_prdt_page->paddr(), PAGE_SIZE, "IDE PRDT", Region::Access::Read | Region::Access::Write);
  49. m_dma_buffer_region = MM.allocate_kernel_region(m_dma_buffer_page->paddr(), PAGE_SIZE, "IDE DMA region", Region::Access::Read | Region::Access::Write);
  50. prdt().end_of_table = 0x8000;
  51. }
  52. static void print_ide_status(u8 status)
  53. {
  54. dbgln("BMIDEChannel: print_ide_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
  55. (status & ATA_SR_DRQ) != 0,
  56. (status & ATA_SR_BSY) != 0,
  57. (status & ATA_SR_DRDY) != 0,
  58. (status & ATA_SR_DSC) != 0,
  59. (status & ATA_SR_DF) != 0,
  60. (status & ATA_SR_CORR) != 0,
  61. (status & ATA_SR_IDX) != 0,
  62. (status & ATA_SR_ERR) != 0);
  63. }
  64. void BMIDEChannel::handle_irq(const RegisterState&)
  65. {
  66. u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
  67. m_entropy_source.add_random_event(status);
  68. VERIFY(m_io_group.bus_master_base().has_value());
  69. u8 bstatus = m_io_group.bus_master_base().value().offset(2).in<u8>();
  70. if (!(bstatus & 0x4)) {
  71. // interrupt not from this device, ignore
  72. dbgln_if(PATA_DEBUG, "BMIDEChannel: ignore interrupt");
  73. return;
  74. }
  75. ScopedSpinLock lock(m_request_lock);
  76. dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
  77. (status & ATA_SR_DRQ) != 0,
  78. (status & ATA_SR_BSY) != 0,
  79. (status & ATA_SR_DRDY) != 0);
  80. if (!m_current_request) {
  81. dbgln("BMIDEChannel: IRQ but no pending request!");
  82. return;
  83. }
  84. if (status & ATA_SR_ERR) {
  85. print_ide_status(status);
  86. m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
  87. dbgln("BMIDEChannel: Error {:#02x}!", (u8)m_device_error);
  88. try_disambiguate_error();
  89. complete_current_request(AsyncDeviceRequest::Failure);
  90. return;
  91. }
  92. m_device_error = 0;
  93. complete_current_request(AsyncDeviceRequest::Success);
  94. }
  95. void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
  96. {
  97. // NOTE: this may be called from the interrupt handler!
  98. VERIFY(m_current_request);
  99. VERIFY(m_request_lock.is_locked());
  100. // Now schedule reading back the buffer as soon as we leave the irq handler.
  101. // This is important so that we can safely write the buffer back,
  102. // which could cause page faults. Note that this may be called immediately
  103. // before Processor::deferred_call_queue returns!
  104. g_io_work->queue([this, result]() {
  105. dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
  106. ScopedSpinLock lock(m_request_lock);
  107. VERIFY(m_current_request);
  108. auto current_request = m_current_request;
  109. m_current_request.clear();
  110. if (result == AsyncDeviceRequest::Success) {
  111. if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
  112. if (!current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * current_request->block_count())) {
  113. lock.unlock();
  114. current_request->complete(AsyncDeviceRequest::MemoryFault);
  115. return;
  116. }
  117. }
  118. // I read somewhere that this may trigger a cache flush so let's do it.
  119. VERIFY(m_io_group.bus_master_base().has_value());
  120. m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
  121. }
  122. lock.unlock();
  123. current_request->complete(result);
  124. });
  125. }
  126. void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
  127. {
  128. VERIFY(m_lock.is_locked());
  129. VERIFY(!m_current_request.is_null());
  130. VERIFY(m_current_request->block_count() <= 256);
  131. ScopedSpinLock m_lock(m_request_lock);
  132. dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
  133. prdt().offset = m_dma_buffer_page->paddr().get();
  134. prdt().size = 512 * m_current_request->block_count();
  135. if (!m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * m_current_request->block_count())) {
  136. complete_current_request(AsyncDeviceRequest::MemoryFault);
  137. return;
  138. }
  139. VERIFY(prdt().size <= PAGE_SIZE);
  140. VERIFY(m_io_group.bus_master_base().has_value());
  141. // Stop bus master
  142. m_io_group.bus_master_base().value().out<u8>(0);
  143. // Write the PRDT location
  144. m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
  145. // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
  146. m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
  147. ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
  148. // Start bus master
  149. m_io_group.bus_master_base().value().out<u8>(0x1);
  150. }
  151. void BMIDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) const
  152. {
  153. if (lba_mode != LBAMode::FortyEightBit) {
  154. m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA : ATA_CMD_WRITE_DMA);
  155. } else {
  156. m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA_EXT : ATA_CMD_WRITE_DMA_EXT);
  157. }
  158. }
  159. void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
  160. {
  161. VERIFY(m_lock.is_locked());
  162. VERIFY(!m_current_request.is_null());
  163. VERIFY(m_current_request->block_count() <= 256);
  164. ScopedSpinLock m_lock(m_request_lock);
  165. dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
  166. prdt().offset = m_dma_buffer_page->paddr().get();
  167. prdt().size = 512 * m_current_request->block_count();
  168. VERIFY(prdt().size <= PAGE_SIZE);
  169. VERIFY(m_io_group.bus_master_base().has_value());
  170. // Stop bus master
  171. m_io_group.bus_master_base().value().out<u8>(0);
  172. // Write the PRDT location
  173. m_io_group.bus_master_base().value().offset(4).out(m_prdt_page->paddr().get());
  174. // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
  175. m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
  176. // Set transfer direction
  177. m_io_group.bus_master_base().value().out<u8>(0x8);
  178. ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
  179. // Start bus master
  180. m_io_group.bus_master_base().value().out<u8>(0x9);
  181. }
  182. }