Kernel/Storage: Merge IDE functionality from BusMasterChannel to Channel

This simplifies the flow of how things work currently and is a step for
more improvements in the next commits.
This commit is contained in:
Liav A 2021-11-20 17:23:09 +02:00 committed by Linus Groh
parent ade672c576
commit 2c987367e6
Notes: sideshowbarker 2024-07-17 08:47:12 +09:00
11 changed files with 294 additions and 358 deletions

View file

@ -95,7 +95,6 @@ set(KERNEL_SOURCES
Storage/ATA/AHCI/Controller.cpp
Storage/ATA/AHCI/Port.cpp
Storage/ATA/AHCI/InterruptHandler.cpp
Storage/ATA/GenericIDE/BusMasterChannel.cpp
Storage/ATA/GenericIDE/Controller.cpp
Storage/ATA/GenericIDE/Channel.cpp
Storage/ATA/GenericIDE/ISAController.cpp

View file

@ -1,241 +0,0 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/Definitions.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullRefPtr<BMIDEChannel> BMIDEChannel::create(IDEController const& ide_controller, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
{
return adopt_ref(*new BMIDEChannel(ide_controller, io_group, type));
}
UNMAP_AFTER_INIT NonnullRefPtr<BMIDEChannel> BMIDEChannel::create(IDEController const& ide_controller, u8 irq, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
{
return adopt_ref(*new BMIDEChannel(ide_controller, irq, io_group, type));
}
UNMAP_AFTER_INIT BMIDEChannel::BMIDEChannel(IDEController const& controller, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
: IDEChannel(controller, io_group, type)
{
initialize();
}
UNMAP_AFTER_INIT BMIDEChannel::BMIDEChannel(IDEController const& controller, u8 irq, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
: IDEChannel(controller, irq, io_group, type)
{
initialize();
}
UNMAP_AFTER_INIT void BMIDEChannel::initialize()
{
VERIFY(m_io_group.bus_master_base().has_value());
// Let's try to set up DMA transfers.
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE PRDT"sv, Memory::Region::Access::ReadWrite, m_prdt_page);
if (region_or_error.is_error())
TODO();
m_prdt_region = region_or_error.release_value();
}
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE DMA region"sv, Memory::Region::Access::ReadWrite, m_dma_buffer_page);
if (region_or_error.is_error())
TODO();
m_dma_buffer_region = region_or_error.release_value();
}
prdt().end_of_table = 0x8000;
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
}
static void print_ide_status(u8 status)
{
dbgln("BMIDEChannel: print_ide_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0,
(status & ATA_SR_DSC) != 0,
(status & ATA_SR_DF) != 0,
(status & ATA_SR_CORR) != 0,
(status & ATA_SR_IDX) != 0,
(status & ATA_SR_ERR) != 0);
}
bool BMIDEChannel::handle_irq(RegisterState const&)
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
m_entropy_source.add_random_event(status);
VERIFY(m_io_group.bus_master_base().has_value());
u8 bstatus = m_io_group.bus_master_base().value().offset(2).in<u8>();
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
dbgln_if(PATA_DEBUG, "BMIDEChannel: ignore interrupt");
return false;
}
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("BMIDEChannel: IRQ but no pending request!");
return false;
}
if (status & ATA_SR_ERR) {
print_ide_status(status);
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("BMIDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_current_request(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
complete_current_request(AsyncDeviceRequest::Success);
return true;
}
void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
SpinlockLocker lock(m_request_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
if (result == AsyncDeviceRequest::Success) {
if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
lock.unlock();
current_request->complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
// I read somewhere that this may trigger a cache flush so let's do it.
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
}
lock.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker locker(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = m_current_request->buffer_size();
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), m_current_request->buffer_size()); result.is_error()) {
complete_current_request(AsyncDeviceRequest::MemoryFault);
return;
}
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x1);
}
void BMIDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) const
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA : ATA_CMD_WRITE_DMA);
} else {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA_EXT : ATA_CMD_WRITE_DMA_EXT);
}
}
void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker locker(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = m_current_request->buffer_size();
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Set transfer direction
m_io_group.bus_master_base().value().out<u8>(0x8);
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x9);
}
}

View file

@ -1,55 +0,0 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 offset;
u16 size { 0 };
u16 end_of_table { 0 };
};
class IDEController;
class BMIDEChannel final : public IDEChannel {
friend class IDEController;
friend class PATADiskDevice;
public:
static NonnullRefPtr<BMIDEChannel> create(IDEController const&, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
static NonnullRefPtr<BMIDEChannel> create(IDEController const&, u8 irq, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
virtual ~BMIDEChannel() override {};
virtual bool is_dma_enabled() const override { return true; };
private:
BMIDEChannel(IDEController const&, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
BMIDEChannel(IDEController const&, u8 irq, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
void initialize();
void complete_current_request(AsyncDeviceRequest::RequestResult);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
//* IDEChannel
virtual void send_ata_io_command(LBAMode lba_mode, Direction direction) const override;
virtual void ata_read_sectors(bool, u16) override;
virtual void ata_write_sectors(bool, u16) override;
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_region->vaddr().as_ptr()); }
OwnPtr<Memory::Region> m_prdt_region;
OwnPtr<Memory::Region> m_dma_buffer_region;
RefPtr<Memory::PhysicalPage> m_prdt_page;
RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
};
}

View file

@ -8,6 +8,7 @@
#include <AK/Singleton.h>
#include <AK/StringView.h>
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Process.h>
#include <Kernel/Sections.h>
@ -42,7 +43,17 @@ RefPtr<StorageDevice> IDEChannel::slave_device() const
return m_slave;
}
UNMAP_AFTER_INIT void IDEChannel::initialize()
UNMAP_AFTER_INIT void IDEChannel::initialize_with_isa_controller(Badge<ISAIDEController>, bool force_pio)
{
initialize(force_pio);
}
UNMAP_AFTER_INIT void IDEChannel::initialize_with_pci_controller(Badge<PCIIDEController>, bool force_pio)
{
initialize(force_pio);
}
UNMAP_AFTER_INIT void IDEChannel::initialize(bool force_pio)
{
disable_irq();
dbgln_if(PATA_DEBUG, "IDEChannel: {} IO base: {}", channel_type_string(), m_io_group.io_base());
@ -75,6 +86,31 @@ UNMAP_AFTER_INIT void IDEChannel::initialize()
// Note: calling to detect_disks could generate an interrupt, clear it if that's the case
clear_pending_interrupts();
if (!force_pio) {
m_dma_enabled = true;
VERIFY(m_io_group.bus_master_base().has_value());
// Let's try to set up DMA transfers.
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE PRDT"sv, Memory::Region::Access::ReadWrite, m_prdt_page);
if (region_or_error.is_error())
TODO();
m_prdt_region = region_or_error.release_value();
VERIFY(!m_prdt_page.is_null());
}
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE DMA region"sv, Memory::Region::Access::ReadWrite, m_dma_buffer_page);
if (region_or_error.is_error())
TODO();
m_dma_buffer_region = region_or_error.release_value();
VERIFY(!m_dma_buffer_page.is_null());
}
prdt().end_of_table = 0x8000;
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
}
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
@ -83,7 +119,6 @@ UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq,
, m_io_group(io_group)
, m_parent_controller(controller)
{
initialize();
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
@ -92,7 +127,6 @@ UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOAddre
, m_io_group(io_group)
, m_parent_controller(controller)
{
initialize();
}
void IDEChannel::clear_pending_interrupts() const
@ -113,13 +147,20 @@ void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave,
m_current_request_block_index = 0;
m_current_request_flushing_cache = false;
if (m_dma_enabled) {
if (request.request_type() == AsyncBlockDeviceRequest::Read)
ata_read_sectors_with_dma(is_slave, capabilities);
else
ata_write_sectors_with_dma(is_slave, capabilities);
return;
}
if (request.request_type() == AsyncBlockDeviceRequest::Read)
ata_read_sectors(is_slave, capabilities);
ata_read_sectors_with_pio(is_slave, capabilities);
else
ata_write_sectors(is_slave, capabilities);
ata_write_sectors_with_pio(is_slave, capabilities);
}
void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
void IDEChannel::complete_dma_transaction(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
@ -130,7 +171,48 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
dbgln_if(PATA_DEBUG, "IDEChannel::complete_dma_transaction result: {}", (int)result);
SpinlockLocker lock(m_request_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
if (result == AsyncDeviceRequest::Success) {
if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
lock.unlock();
current_request->complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
// I read somewhere that this may trigger a cache flush so let's do it.
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
}
lock.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void IDEChannel::complete_pio_transaction(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_pio_transaction result: {}", (int)result);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
@ -193,7 +275,47 @@ void IDEChannel::try_disambiguate_error()
}
}
bool IDEChannel::handle_irq(RegisterState const&)
bool IDEChannel::handle_irq_for_dma_transaction()
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
m_entropy_source.add_random_event(status);
VERIFY(m_io_group.bus_master_base().has_value());
u8 bstatus = m_io_group.bus_master_base().value().offset(2).in<u8>();
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
dbgln_if(PATA_DEBUG, "IDEChannel: ignore interrupt");
return false;
}
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("IDEChannel: IRQ but no pending request!");
return false;
}
if (status & ATA_SR_ERR) {
print_ide_status(status);
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("IDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_dma_transaction(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
complete_dma_transaction(AsyncDeviceRequest::Success);
return true;
}
bool IDEChannel::handle_irq_for_pio_transaction()
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
@ -215,7 +337,7 @@ bool IDEChannel::handle_irq(RegisterState const&)
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("IDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_current_request(AsyncDeviceRequest::Failure);
complete_pio_transaction(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
@ -229,9 +351,9 @@ bool IDEChannel::handle_irq(RegisterState const&)
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
if (ata_do_read_sector()) {
if (ata_do_pio_read_sector()) {
if (++m_current_request_block_index >= m_current_request->block_count()) {
complete_current_request(AsyncDeviceRequest::Success);
complete_pio_transaction(AsyncDeviceRequest::Success);
return;
}
// Wait for the next block
@ -247,10 +369,10 @@ bool IDEChannel::handle_irq(RegisterState const&)
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
} else {
// Read next block
ata_do_write_sector();
ata_do_pio_write_sector();
}
} else {
complete_current_request(AsyncDeviceRequest::Success);
complete_pio_transaction(AsyncDeviceRequest::Success);
}
}
});
@ -262,6 +384,13 @@ bool IDEChannel::handle_irq(RegisterState const&)
return true;
}
bool IDEChannel::handle_irq(RegisterState const&)
{
if (!m_dma_enabled)
return handle_irq_for_pio_transaction();
return handle_irq_for_dma_transaction();
}
static void io_delay()
{
for (int i = 0; i < 4; ++i)
@ -467,11 +596,14 @@ void IDEChannel::ata_access(Direction direction, bool slave_request, u64 lba, u8
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
break;
}
send_ata_io_command(lba_mode, direction);
if (m_dma_enabled)
send_ata_dma_command(lba_mode, direction);
else
send_ata_pio_command(lba_mode, direction);
enable_irq();
}
void IDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) const
void IDEChannel::send_ata_pio_command(LBAMode lba_mode, Direction direction) const
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO : ATA_CMD_WRITE_PIO);
@ -480,12 +612,12 @@ void IDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) cons
}
}
bool IDEChannel::ata_do_read_sector()
bool IDEChannel::ata_do_pio_read_sector()
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
VERIFY(!m_current_request.is_null());
dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_read_sector");
dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_pio_read_sector");
auto& request = *m_current_request;
auto block_size = m_current_request->block_size();
auto out_buffer = request.buffer().offset(m_current_request_block_index * block_size);
@ -496,26 +628,25 @@ bool IDEChannel::ata_do_read_sector()
});
if (result.is_error()) {
// TODO: Do we need to abort the PATA read if this wasn't the last block?
complete_current_request(AsyncDeviceRequest::MemoryFault);
complete_pio_transaction(AsyncDeviceRequest::MemoryFault);
return false;
}
return true;
}
// FIXME: This doesn't quite work and locks up reading LBA 3.
void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
void IDEChannel::ata_read_sectors_with_pio(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors_with_pio");
dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
}
void IDEChannel::ata_do_write_sector()
void IDEChannel::ata_do_pio_write_sector()
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
@ -538,11 +669,11 @@ void IDEChannel::ata_do_write_sector()
return readonly_bytes.size();
});
if (result.is_error())
complete_current_request(AsyncDeviceRequest::MemoryFault);
complete_pio_transaction(AsyncDeviceRequest::MemoryFault);
}
// FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
void IDEChannel::ata_write_sectors_with_pio(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
@ -551,6 +682,92 @@ void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
ata_do_write_sector();
ata_do_pio_write_sector();
}
void IDEChannel::send_ata_dma_command(LBAMode lba_mode, Direction direction) const
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA : ATA_CMD_WRITE_DMA);
} else {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA_EXT : ATA_CMD_WRITE_DMA_EXT);
}
}
void IDEChannel::ata_read_sectors_with_dma(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = 512 * m_current_request->block_count();
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Set transfer direction
m_io_group.bus_master_base().value().out<u8>(0x8);
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x9);
}
void IDEChannel::ata_write_sectors_with_dma(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_write_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = 512 * m_current_request->block_count();
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * m_current_request->block_count()); result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::MemoryFault);
return;
}
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x1);
}
}

View file

@ -17,6 +17,7 @@
#pragma once
#include <AK/Badge.h>
#include <AK/RefPtr.h>
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Devices/Device.h>
@ -26,6 +27,7 @@
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/Storage/ATA/ATADevice.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>
#include <Kernel/WaitQueue.h>
@ -33,7 +35,8 @@ namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEController;
class PCIIDEController;
class ISAIDEController;
class IDEChannel : public RefCounted<IDEChannel>
, public IRQHandler {
friend class IDEController;
@ -57,6 +60,13 @@ public:
{
}
IOAddressGroup(IOAddress io_base, IOAddress control_base, Optional<IOAddress> bus_master_base)
: m_io_base(io_base)
, m_control_base(control_base)
, m_bus_master_base(bus_master_base)
{
}
IOAddressGroup(IOAddress io_base, IOAddress control_base)
: m_io_base(io_base)
, m_control_base(control_base)
@ -93,6 +103,9 @@ public:
public:
static NonnullRefPtr<IDEChannel> create(IDEController const&, IOAddressGroup, ChannelType type);
static NonnullRefPtr<IDEChannel> create(IDEController const&, u8 irq, IOAddressGroup, ChannelType type);
void initialize_with_pci_controller(Badge<PCIIDEController>, bool force_pio);
void initialize_with_isa_controller(Badge<ISAIDEController>, bool force_pio);
virtual ~IDEChannel() override;
RefPtr<StorageDevice> master_device() const;
@ -100,14 +113,15 @@ public:
virtual StringView purpose() const override { return "PATA Channel"sv; }
virtual bool is_dma_enabled() const { return false; }
private:
void complete_current_request(AsyncDeviceRequest::RequestResult);
void initialize();
static constexpr size_t m_logical_sector_size = 512;
void initialize(bool force_pio);
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 offset;
u16 size { 0 };
u16 end_of_table { 0 };
};
protected:
enum class LBAMode : u8 {
None, // CHS
TwentyEightBit,
@ -123,11 +137,18 @@ protected:
IDEChannel(IDEController const&, u8 irq, IOAddressGroup, ChannelType type);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
bool handle_irq_for_dma_transaction();
void complete_dma_transaction(AsyncDeviceRequest::RequestResult);
bool handle_irq_for_pio_transaction();
void complete_pio_transaction(AsyncDeviceRequest::RequestResult);
virtual void send_ata_io_command(LBAMode lba_mode, Direction direction) const;
void send_ata_pio_command(LBAMode lba_mode, Direction direction) const;
void ata_read_sectors_with_pio(bool, u16);
void ata_write_sectors_with_pio(bool, u16);
virtual void ata_read_sectors(bool, u16);
virtual void ata_write_sectors(bool, u16);
void send_ata_dma_command(LBAMode lba_mode, Direction direction) const;
void ata_read_sectors_with_dma(bool, u16);
void ata_write_sectors_with_dma(bool, u16);
void detect_disks();
StringView channel_type_string() const;
@ -142,8 +163,10 @@ protected:
void ata_access(Direction, bool, u64, u8, u16);
bool ata_do_read_sector();
void ata_do_write_sector();
bool ata_do_pio_read_sector();
void ata_do_pio_write_sector();
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_region->vaddr().as_ptr()); }
// Data members
ChannelType m_channel_type { ChannelType::Primary };
@ -160,7 +183,13 @@ protected:
Spinlock m_request_lock;
Mutex m_lock { "IDEChannel"sv };
bool m_dma_enabled { false };
IOAddressGroup m_io_group;
OwnPtr<Memory::Region> m_prdt_region;
OwnPtr<Memory::Region> m_dma_buffer_region;
RefPtr<Memory::PhysicalPage> m_prdt_page;
RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
NonnullRefPtr<IDEController> m_parent_controller;
};
}

View file

@ -11,7 +11,7 @@
#include <Kernel/FileSystem/ProcFS.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
namespace Kernel {

View file

@ -10,13 +10,12 @@
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Storage/ATA/ATAController.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/StorageDevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEChannel;
class IDEController : public ATAController {
public:
static NonnullRefPtr<IDEController> initialize();

View file

@ -11,7 +11,7 @@
#include <Kernel/FileSystem/ProcFS.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/ISAController.h>
namespace Kernel {
@ -34,9 +34,11 @@ UNMAP_AFTER_INIT void ISAIDEController::initialize_channels()
auto secondary_control_io = IOAddress(0x376);
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
m_channels[0].initialize_with_isa_controller({}, true);
m_channels[0].enable_irq();
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
m_channels[1].initialize_with_isa_controller({}, true);
m_channels[1].enable_irq();
dbgln("ISA IDE controller detected and initialized");
}

View file

@ -9,7 +9,6 @@
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>

View file

@ -11,7 +11,7 @@
#include <Kernel/FileSystem/ProcFS.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/PCIController.h>
namespace Kernel {
@ -105,31 +105,19 @@ UNMAP_AFTER_INIT void PCIIDEController::initialize(bool force_pio)
}
if (is_pci_native_mode_enabled_on_primary_channel()) {
if (force_pio)
m_channels.append(IDEChannel::create(*this, irq_line, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
else
m_channels.append(BMIDEChannel::create(*this, irq_line, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
m_channels.append(IDEChannel::create(*this, irq_line, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
} else {
if (force_pio)
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
else
m_channels.append(BMIDEChannel::create(*this, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
}
m_channels[0].initialize_with_pci_controller({}, force_pio);
m_channels[0].enable_irq();
if (is_pci_native_mode_enabled_on_secondary_channel()) {
if (force_pio)
m_channels.append(IDEChannel::create(*this, irq_line, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
else
m_channels.append(BMIDEChannel::create(*this, irq_line, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
m_channels.append(IDEChannel::create(*this, irq_line, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
} else {
if (force_pio)
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
else
m_channels.append(BMIDEChannel::create(*this, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
}
m_channels[1].initialize_with_pci_controller({}, force_pio);
m_channels[1].enable_irq();
}

View file

@ -9,7 +9,6 @@
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>