Channel.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/ByteBuffer.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/StringView.h>
  9. #include <Kernel/Arch/x86/IO.h>
  10. #include <Kernel/Memory/MemoryManager.h>
  11. #include <Kernel/Process.h>
  12. #include <Kernel/Sections.h>
  13. #include <Kernel/Storage/ATA/ATA.h>
  14. #include <Kernel/Storage/ATA/ATADiskDevice.h>
  15. #include <Kernel/Storage/ATA/GenericIDE/Channel.h>
  16. #include <Kernel/Storage/ATA/GenericIDE/Controller.h>
  17. #include <Kernel/WorkQueue.h>
  18. namespace Kernel {
  19. #define PATA_PRIMARY_IRQ 14
  20. #define PATA_SECONDARY_IRQ 15
  21. UNMAP_AFTER_INIT NonnullRefPtr<IDEChannel> IDEChannel::create(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
  22. {
  23. return adopt_ref(*new IDEChannel(controller, io_group, type));
  24. }
  25. UNMAP_AFTER_INIT NonnullRefPtr<IDEChannel> IDEChannel::create(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
  26. {
  27. return adopt_ref(*new IDEChannel(controller, irq, io_group, type));
  28. }
  29. RefPtr<StorageDevice> IDEChannel::master_device() const
  30. {
  31. return m_master;
  32. }
  33. RefPtr<StorageDevice> IDEChannel::slave_device() const
  34. {
  35. return m_slave;
  36. }
  37. UNMAP_AFTER_INIT void IDEChannel::initialize()
  38. {
  39. disable_irq();
  40. dbgln_if(PATA_DEBUG, "IDEChannel: {} IO base: {}", channel_type_string(), m_io_group.io_base());
  41. dbgln_if(PATA_DEBUG, "IDEChannel: {} control base: {}", channel_type_string(), m_io_group.control_base());
  42. if (m_io_group.bus_master_base().has_value())
  43. dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base: {}", channel_type_string(), m_io_group.bus_master_base().value());
  44. else
  45. dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base disabled", channel_type_string());
  46. // reset the channel
  47. u8 device_control = m_io_group.control_base().in<u8>();
  48. // Wait 30 milliseconds
  49. IO::delay(30000);
  50. m_io_group.control_base().out<u8>(device_control | (1 << 2));
  51. // Wait 30 milliseconds
  52. IO::delay(30000);
  53. m_io_group.control_base().out<u8>(device_control);
  54. // Wait up to 30 seconds before failing
  55. if (!select_device_and_wait_until_not_busy(DeviceType::Master, 30000)) {
  56. dbgln("IDEChannel: reset failed, busy flag on master stuck");
  57. return;
  58. }
  59. // Wait up to 30 seconds before failing
  60. if (!select_device_and_wait_until_not_busy(DeviceType::Slave, 30000)) {
  61. dbgln("IDEChannel: reset failed, busy flag on slave stuck");
  62. return;
  63. }
  64. detect_disks();
  65. // Note: calling to detect_disks could generate an interrupt, clear it if that's the case
  66. clear_pending_interrupts();
  67. }
  68. UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
  69. : IRQHandler(irq)
  70. , m_channel_type(type)
  71. , m_io_group(io_group)
  72. , m_parent_controller(controller)
  73. {
  74. initialize();
  75. }
  76. UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
  77. : IRQHandler(type == ChannelType::Primary ? PATA_PRIMARY_IRQ : PATA_SECONDARY_IRQ)
  78. , m_channel_type(type)
  79. , m_io_group(io_group)
  80. , m_parent_controller(controller)
  81. {
  82. initialize();
  83. }
  84. void IDEChannel::clear_pending_interrupts() const
  85. {
  86. m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
  87. }
  88. UNMAP_AFTER_INIT IDEChannel::~IDEChannel() = default;
  89. void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave, u16 capabilities)
  90. {
  91. MutexLocker locker(m_lock);
  92. VERIFY(m_current_request.is_null());
  93. dbgln_if(PATA_DEBUG, "IDEChannel::start_request");
  94. m_current_request = request;
  95. m_current_request_block_index = 0;
  96. m_current_request_flushing_cache = false;
  97. if (request.request_type() == AsyncBlockDeviceRequest::Read)
  98. ata_read_sectors(is_slave, capabilities);
  99. else
  100. ata_write_sectors(is_slave, capabilities);
  101. }
  102. void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
  103. {
  104. // NOTE: this may be called from the interrupt handler!
  105. VERIFY(m_current_request);
  106. VERIFY(m_request_lock.is_locked());
  107. // Now schedule reading back the buffer as soon as we leave the irq handler.
  108. // This is important so that we can safely write the buffer back,
  109. // which could cause page faults. Note that this may be called immediately
  110. // before Processor::deferred_call_queue returns!
  111. auto work_item_creation_result = g_io_work->try_queue([this, result]() {
  112. dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
  113. MutexLocker locker(m_lock);
  114. VERIFY(m_current_request);
  115. auto current_request = m_current_request;
  116. m_current_request.clear();
  117. current_request->complete(result);
  118. });
  119. if (work_item_creation_result.is_error()) {
  120. auto current_request = m_current_request;
  121. m_current_request.clear();
  122. current_request->complete(AsyncDeviceRequest::OutOfMemory);
  123. }
  124. }
  125. static void print_ide_status(u8 status)
  126. {
  127. dbgln("IDEChannel: print_ide_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
  128. (status & ATA_SR_DRQ) != 0,
  129. (status & ATA_SR_BSY) != 0,
  130. (status & ATA_SR_DRDY) != 0,
  131. (status & ATA_SR_DSC) != 0,
  132. (status & ATA_SR_DF) != 0,
  133. (status & ATA_SR_CORR) != 0,
  134. (status & ATA_SR_IDX) != 0,
  135. (status & ATA_SR_ERR) != 0);
  136. }
  137. void IDEChannel::try_disambiguate_error()
  138. {
  139. VERIFY(m_request_lock.is_locked());
  140. dbgln("IDEChannel: Error cause:");
  141. switch (m_device_error) {
  142. case ATA_ER_BBK:
  143. dbgln("IDEChannel: - Bad block");
  144. break;
  145. case ATA_ER_UNC:
  146. dbgln("IDEChannel: - Uncorrectable data");
  147. break;
  148. case ATA_ER_MC:
  149. dbgln("IDEChannel: - Media changed");
  150. break;
  151. case ATA_ER_IDNF:
  152. dbgln("IDEChannel: - ID mark not found");
  153. break;
  154. case ATA_ER_MCR:
  155. dbgln("IDEChannel: - Media change request");
  156. break;
  157. case ATA_ER_ABRT:
  158. dbgln("IDEChannel: - Command aborted");
  159. break;
  160. case ATA_ER_TK0NF:
  161. dbgln("IDEChannel: - Track 0 not found");
  162. break;
  163. case ATA_ER_AMNF:
  164. dbgln("IDEChannel: - No address mark");
  165. break;
  166. default:
  167. dbgln("IDEChannel: - No one knows");
  168. break;
  169. }
  170. }
  171. bool IDEChannel::handle_irq(RegisterState const&)
  172. {
  173. u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
  174. m_entropy_source.add_random_event(status);
  175. SpinlockLocker lock(m_request_lock);
  176. dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
  177. (status & ATA_SR_DRQ) != 0,
  178. (status & ATA_SR_BSY) != 0,
  179. (status & ATA_SR_DRDY) != 0);
  180. if (!m_current_request) {
  181. dbgln("IDEChannel: IRQ but no pending request!");
  182. return false;
  183. }
  184. if (status & ATA_SR_ERR) {
  185. print_ide_status(status);
  186. m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
  187. dbgln("IDEChannel: Error {:#02x}!", (u8)m_device_error);
  188. try_disambiguate_error();
  189. complete_current_request(AsyncDeviceRequest::Failure);
  190. return true;
  191. }
  192. m_device_error = 0;
  193. // Now schedule reading/writing the buffer as soon as we leave the irq handler.
  194. // This is important so that we can safely access the buffers, which could
  195. // trigger page faults
  196. auto work_item_creation_result = g_io_work->try_queue([this]() {
  197. MutexLocker locker(m_lock);
  198. SpinlockLocker lock(m_request_lock);
  199. if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
  200. dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
  201. if (ata_do_read_sector()) {
  202. if (++m_current_request_block_index >= m_current_request->block_count()) {
  203. complete_current_request(AsyncDeviceRequest::Success);
  204. return;
  205. }
  206. // Wait for the next block
  207. enable_irq();
  208. }
  209. } else {
  210. if (!m_current_request_flushing_cache) {
  211. dbgln_if(PATA_DEBUG, "IDEChannel: Wrote block {}/{}", m_current_request_block_index, m_current_request->block_count());
  212. if (++m_current_request_block_index >= m_current_request->block_count()) {
  213. // We read the last block, flush cache
  214. VERIFY(!m_current_request_flushing_cache);
  215. m_current_request_flushing_cache = true;
  216. m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
  217. } else {
  218. // Read next block
  219. ata_do_write_sector();
  220. }
  221. } else {
  222. complete_current_request(AsyncDeviceRequest::Success);
  223. }
  224. }
  225. });
  226. if (work_item_creation_result.is_error()) {
  227. auto current_request = m_current_request;
  228. m_current_request.clear();
  229. current_request->complete(AsyncDeviceRequest::OutOfMemory);
  230. }
  231. return true;
  232. }
  233. static void io_delay()
  234. {
  235. for (int i = 0; i < 4; ++i)
  236. IO::in8(0x3f6);
  237. }
  238. bool IDEChannel::select_device_and_wait_until_not_busy(DeviceType device_type, size_t milliseconds_timeout)
  239. {
  240. IO::delay(20);
  241. u8 slave = device_type == DeviceType::Slave;
  242. m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | (slave << 4));
  243. IO::delay(20);
  244. size_t time_elapsed = 0;
  245. while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
  246. IO::delay(1000);
  247. time_elapsed++;
  248. }
  249. return time_elapsed <= milliseconds_timeout;
  250. }
  251. bool IDEChannel::wait_until_not_busy(size_t milliseconds_timeout)
  252. {
  253. size_t time_elapsed = 0;
  254. while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
  255. IO::delay(1000);
  256. time_elapsed++;
  257. }
  258. return time_elapsed <= milliseconds_timeout;
  259. }
  260. StringView IDEChannel::channel_type_string() const
  261. {
  262. if (m_channel_type == ChannelType::Primary)
  263. return "Primary"sv;
  264. return "Secondary"sv;
  265. }
  266. UNMAP_AFTER_INIT void IDEChannel::detect_disks()
  267. {
  268. auto channel_string = [](u8 i) -> StringView {
  269. if (i == 0)
  270. return "master"sv;
  271. return "slave"sv;
  272. };
  273. // There are only two possible disks connected to a channel
  274. for (auto i = 0; i < 2; i++) {
  275. if (!select_device_and_wait_until_not_busy(i == 0 ? DeviceType::Master : DeviceType::Slave, 32000)) {
  276. dbgln("IDEChannel: Timeout waiting for busy flag to clear during {} {} detection", channel_type_string(), channel_string(i));
  277. continue;
  278. }
  279. auto status = m_io_group.control_base().in<u8>();
  280. if (status == 0x0) {
  281. dbgln_if(PATA_DEBUG, "IDEChannel: No {} {} disk detected!", channel_type_string(), channel_string(i));
  282. continue;
  283. }
  284. m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(0);
  285. m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(0);
  286. m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>(0);
  287. m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>(0);
  288. m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_IDENTIFY); // Send the ATA_IDENTIFY command
  289. // Wait 10 second for the BSY flag to clear
  290. if (!wait_until_not_busy(2000)) {
  291. dbgln_if(PATA_DEBUG, "IDEChannel: No {} {} disk detected, BSY flag was not reset!", channel_type_string(), channel_string(i));
  292. continue;
  293. }
  294. bool check_for_atapi = false;
  295. bool device_presence = true;
  296. bool command_set_is_atapi = false;
  297. size_t milliseconds_elapsed = 0;
  298. for (;;) {
  299. // Wait about 10 seconds
  300. if (milliseconds_elapsed > 2000)
  301. break;
  302. u8 status = m_io_group.control_base().in<u8>();
  303. if (status & ATA_SR_ERR) {
  304. dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device is not ATA. Will check for ATAPI.", channel_type_string(), channel_string(i));
  305. check_for_atapi = true;
  306. break;
  307. }
  308. if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRQ)) {
  309. dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device appears to be ATA.", channel_type_string(), channel_string(i));
  310. break;
  311. }
  312. if (status == 0 || status == 0xFF) {
  313. dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device presence - none.", channel_type_string(), channel_string(i));
  314. device_presence = false;
  315. break;
  316. }
  317. IO::delay(1000);
  318. milliseconds_elapsed++;
  319. }
  320. if (!device_presence) {
  321. continue;
  322. }
  323. if (milliseconds_elapsed > 10000) {
  324. dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device state unknown. Timeout exceeded.", channel_type_string(), channel_string(i));
  325. continue;
  326. }
  327. if (check_for_atapi) {
  328. u8 cl = m_io_group.io_base().offset(ATA_REG_LBA1).in<u8>();
  329. u8 ch = m_io_group.io_base().offset(ATA_REG_LBA2).in<u8>();
  330. if ((cl == 0x14 && ch == 0xEB) || (cl == 0x69 && ch == 0x96)) {
  331. command_set_is_atapi = true;
  332. dbgln("IDEChannel: {} {} device appears to be ATAPI. We're going to ignore it for now as we don't support it.", channel_type_string(), channel_string(i));
  333. continue;
  334. } else {
  335. dbgln("IDEChannel: {} {} device doesn't appear to be ATA or ATAPI. Ignoring it.", channel_type_string(), channel_string(i));
  336. continue;
  337. }
  338. }
  339. // FIXME: Handle possible OOM situation here.
  340. ByteBuffer wbuf = ByteBuffer::create_uninitialized(m_logical_sector_size).release_value_but_fixme_should_propagate_errors();
  341. ByteBuffer bbuf = ByteBuffer::create_uninitialized(m_logical_sector_size).release_value_but_fixme_should_propagate_errors();
  342. u8* b = bbuf.data();
  343. u16* w = (u16*)wbuf.data();
  344. for (u32 i = 0; i < 256; ++i) {
  345. u16 data = m_io_group.io_base().offset(ATA_REG_DATA).in<u16>();
  346. *(w++) = data;
  347. *(b++) = MSB(data);
  348. *(b++) = LSB(data);
  349. }
  350. // "Unpad" the device name string.
  351. for (u32 i = 93; i > 54 && bbuf[i] == ' '; --i)
  352. bbuf[i] = 0;
  353. ATAIdentifyBlock volatile& identify_block = (ATAIdentifyBlock volatile&)(*wbuf.data());
  354. u16 capabilities = identify_block.capabilities[0];
  355. // If the drive is so old that it doesn't support LBA, ignore it.
  356. if (!(capabilities & ATA_CAP_LBA))
  357. continue;
  358. u64 max_addressable_block = identify_block.max_28_bit_addressable_logical_sector;
  359. // if we support 48-bit LBA, use that value instead.
  360. if (identify_block.commands_and_feature_sets_supported[1] & (1 << 10))
  361. max_addressable_block = identify_block.user_addressable_logical_sectors_count;
  362. dbgln("IDEChannel: {} {} {} device found: Name={}, Capacity={}, Capabilities={:#04x}", channel_type_string(), channel_string(i), !command_set_is_atapi ? "ATA" : "ATAPI", ((char*)bbuf.data() + 54), max_addressable_block * m_logical_sector_size, capabilities);
  363. ATADevice::Address address = { m_channel_type == ChannelType::Primary ? static_cast<u8>(0) : static_cast<u8>(1), static_cast<u8>(i) };
  364. if (i == 0) {
  365. m_master = ATADiskDevice::create(m_parent_controller, address, capabilities, m_logical_sector_size, max_addressable_block);
  366. } else {
  367. m_slave = ATADiskDevice::create(m_parent_controller, address, capabilities, m_logical_sector_size, max_addressable_block);
  368. }
  369. }
  370. }
  371. void IDEChannel::ata_access(Direction direction, bool slave_request, u64 lba, u8 block_count, u16 capabilities)
  372. {
  373. VERIFY(m_lock.is_locked());
  374. VERIFY(m_request_lock.is_locked());
  375. LBAMode lba_mode;
  376. u8 head = 0;
  377. VERIFY(capabilities & ATA_CAP_LBA);
  378. if (lba >= 0x10000000) {
  379. lba_mode = LBAMode::FortyEightBit;
  380. head = 0;
  381. } else {
  382. lba_mode = LBAMode::TwentyEightBit;
  383. head = (lba & 0xF000000) >> 24;
  384. }
  385. // Wait 1 second
  386. wait_until_not_busy(1000);
  387. // We need to select the drive and then we wait 20 microseconds... and it doesn't hurt anything so let's just do it.
  388. m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xE0 | (static_cast<u8>(slave_request) << 4) | head);
  389. IO::delay(20);
  390. if (lba_mode == LBAMode::FortyEightBit) {
  391. m_io_group.io_base().offset(ATA_REG_SECCOUNT1).out<u8>(0);
  392. m_io_group.io_base().offset(ATA_REG_LBA3).out<u8>((lba & 0xFF000000) >> 24);
  393. m_io_group.io_base().offset(ATA_REG_LBA4).out<u8>((lba & 0xFF00000000ull) >> 32);
  394. m_io_group.io_base().offset(ATA_REG_LBA5).out<u8>((lba & 0xFF0000000000ull) >> 40);
  395. }
  396. m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(block_count);
  397. m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>((lba & 0x000000FF) >> 0);
  398. m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>((lba & 0x0000FF00) >> 8);
  399. m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>((lba & 0x00FF0000) >> 16);
  400. for (;;) {
  401. auto status = m_io_group.control_base().in<u8>();
  402. if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
  403. break;
  404. }
  405. send_ata_io_command(lba_mode, direction);
  406. enable_irq();
  407. }
  408. void IDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) const
  409. {
  410. if (lba_mode != LBAMode::FortyEightBit) {
  411. m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO : ATA_CMD_WRITE_PIO);
  412. } else {
  413. m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO_EXT : ATA_CMD_WRITE_PIO_EXT);
  414. }
  415. }
  416. bool IDEChannel::ata_do_read_sector()
  417. {
  418. VERIFY(m_lock.is_locked());
  419. VERIFY(m_request_lock.is_locked());
  420. VERIFY(!m_current_request.is_null());
  421. dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_read_sector");
  422. auto& request = *m_current_request;
  423. auto block_size = m_current_request->block_size();
  424. auto out_buffer = request.buffer().offset(m_current_request_block_index * block_size);
  425. auto result = request.write_to_buffer_buffered<m_logical_sector_size>(out_buffer, block_size, [&](Bytes bytes) {
  426. for (size_t i = 0; i < bytes.size(); i += sizeof(u16))
  427. *(u16*)bytes.offset_pointer(i) = IO::in16(m_io_group.io_base().offset(ATA_REG_DATA).get());
  428. return bytes.size();
  429. });
  430. if (result.is_error()) {
  431. // TODO: Do we need to abort the PATA read if this wasn't the last block?
  432. complete_current_request(AsyncDeviceRequest::MemoryFault);
  433. return false;
  434. }
  435. return true;
  436. }
  437. // FIXME: This doesn't quite work and locks up reading LBA 3.
  438. void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
  439. {
  440. VERIFY(m_lock.is_locked());
  441. VERIFY(!m_current_request.is_null());
  442. VERIFY(m_current_request->block_count() <= 256);
  443. SpinlockLocker m_lock(m_request_lock);
  444. dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
  445. dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
  446. ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
  447. }
  448. void IDEChannel::ata_do_write_sector()
  449. {
  450. VERIFY(m_lock.is_locked());
  451. VERIFY(m_request_lock.is_locked());
  452. VERIFY(!m_current_request.is_null());
  453. auto& request = *m_current_request;
  454. io_delay();
  455. while ((m_io_group.control_base().in<u8>() & ATA_SR_BSY) || !(m_io_group.control_base().in<u8>() & ATA_SR_DRQ))
  456. ;
  457. u8 status = m_io_group.control_base().in<u8>();
  458. VERIFY(status & ATA_SR_DRQ);
  459. auto block_size = m_current_request->block_size();
  460. auto in_buffer = request.buffer().offset(m_current_request_block_index * block_size);
  461. dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} bytes (part {}) (status={:#02x})...", block_size, m_current_request_block_index, status);
  462. auto result = request.read_from_buffer_buffered<m_logical_sector_size>(in_buffer, block_size, [&](ReadonlyBytes readonly_bytes) {
  463. for (size_t i = 0; i < readonly_bytes.size(); i += sizeof(u16))
  464. IO::out16(m_io_group.io_base().offset(ATA_REG_DATA).get(), *(const u16*)readonly_bytes.offset(i));
  465. return readonly_bytes.size();
  466. });
  467. if (result.is_error())
  468. complete_current_request(AsyncDeviceRequest::MemoryFault);
  469. }
  470. // FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
  471. void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
  472. {
  473. VERIFY(m_lock.is_locked());
  474. VERIFY(!m_current_request.is_null());
  475. VERIFY(m_current_request->block_count() <= 256);
  476. SpinlockLocker m_lock(m_request_lock);
  477. dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
  478. ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
  479. ata_do_write_sector();
  480. }
  481. }