ladybird/Kernel/Bus/PCI/Access.cpp
Liav A 1f9d3a3523 Kernel/PCI: Hold a reference to DeviceIdentifier in the Device class
There are now 2 separate classes for almost the same object type:
- EnumerableDeviceIdentifier, which is used in the enumeration code for
  all PCI host controller classes. This is allowed to be moved and
  copied, as it doesn't support ref-counting.
- DeviceIdentifier, which inherits from EnumerableDeviceIdentifier. This
  class uses ref-counting, and is not allowed to be copied. It has a
  spinlock member in its structure to allow safely executing complicated
  IO sequences on a PCI device and its space configuration.
  There's a static method that allows a quick conversion from
  EnumerableDeviceIdentifier to DeviceIdentifier while creating a
  NonnullRefPtr out of it.

The reason for doing this is for the sake of integrity and reliablity of
the system in 2 places:
- Ensure that "complicated" tasks that rely on manipulating PCI device
  registers are done in a safe manner. For example, determining a PCI
  BAR space size requires multiple read and writes to the same register,
  and if another CPU tries to do something else with our selected
  register, then the result will be a catastrophe.
- Allow the PCI API to have a united form around a shared object which
  actually holds much more data than the PCI::Address structure. This is
  fundamental if we want to do certain types of optimizations, and be
  able to support more features of the PCI bus in the foreseeable
  future.

This patch already has several implications:
- All PCI::Device(s) hold a reference to a DeviceIdentifier structure
  being given originally from the PCI::Access singleton. This means that
  all instances of DeviceIdentifier structures are located in one place,
  and all references are pointing to that location. This ensures that
  locking the operation spinlock will take effect in all the appropriate
  places.
- We no longer support adding PCI host controllers and then immediately
  allow for enumerating it with a lambda function. It was found that
  this method is extremely broken and too much complicated to work
  reliably with the new paradigm being introduced in this patch. This
  means that for Volume Management Devices (Intel VMD devices), we
  simply first enumerate the PCI bus for such devices in the storage
  code, and if we find a device, we attach it in the PCI::Access method
  which will scan for devices behind that bridge and will add new
  DeviceIdentifier(s) objects to its internal Vector. Afterwards, we
  just continue as usual with scanning for actual storage controllers,
  so we will find a corresponding NVMe controllers if there were any
  behind that VMD bridge.
2023-01-26 23:04:26 +01:00

273 lines
11 KiB
C++

/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/ByteReader.h>
#include <AK/Error.h>
#include <AK/HashTable.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/PCI/Controller/HostBridge.h>
#endif
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Controller/MemoryBackedHostBridge.h>
#include <Kernel/Bus/PCI/Initializer.h>
#include <Kernel/Debug.h>
#include <Kernel/Firmware/ACPI/Definitions.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/Region.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/ProcessExposed.h>
#include <Kernel/Sections.h>
namespace Kernel::PCI {
#define PCI_MMIO_CONFIG_SPACE_SIZE 4096
static Access* s_access;
Access& Access::the()
{
if (s_access == nullptr) {
VERIFY_NOT_REACHED(); // We failed to initialize the PCI subsystem, so stop here!
}
return *s_access;
}
bool Access::is_initialized()
{
return (s_access != nullptr);
}
bool Access::is_hardware_disabled()
{
return g_pci_access_io_probe_failed;
}
bool Access::is_disabled()
{
return g_pci_access_is_disabled_from_commandline || g_pci_access_io_probe_failed;
}
UNMAP_AFTER_INIT bool Access::find_and_register_pci_host_bridges_from_acpi_mcfg_table(PhysicalAddress mcfg_table)
{
u32 length = 0;
u8 revision = 0;
{
auto mapped_mcfg_table_or_error = Memory::map_typed<ACPI::Structures::SDTHeader>(mcfg_table);
if (mapped_mcfg_table_or_error.is_error()) {
dbgln("Failed to map MCFG table");
return false;
}
auto mapped_mcfg_table = mapped_mcfg_table_or_error.release_value();
length = mapped_mcfg_table->length;
revision = mapped_mcfg_table->revision;
}
if (length == sizeof(ACPI::Structures::SDTHeader))
return false;
dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
if (Checked<size_t>::addition_would_overflow(length, PAGE_SIZE)) {
dbgln("Overflow when adding extra page to allocation of length {}", length);
return false;
}
length += PAGE_SIZE;
auto region_size_or_error = Memory::page_round_up(length);
if (region_size_or_error.is_error()) {
dbgln("Failed to round up length of {} to pages", length);
return false;
}
auto mcfg_region_or_error = MM.allocate_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite);
if (mcfg_region_or_error.is_error())
return false;
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region_or_error.value()->vaddr().offset(mcfg_table.offset_in_page()).as_ptr();
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), mcfg_table);
for (u32 index = 0; index < ((mcfg.header.length - sizeof(ACPI::Structures::MCFG)) / sizeof(ACPI::Structures::PCI_MMIO_Descriptor)); index++) {
u8 start_bus = mcfg.descriptors[index].start_pci_bus;
u8 end_bus = mcfg.descriptors[index].end_pci_bus;
u64 start_addr = mcfg.descriptors[index].base_addr;
Domain pci_domain { index, start_bus, end_bus };
dmesgln("PCI: New PCI domain @ {}, PCI buses ({}-{})", PhysicalAddress { start_addr }, start_bus, end_bus);
auto host_bridge = MemoryBackedHostBridge::must_create(pci_domain, PhysicalAddress { start_addr });
add_host_controller(move(host_bridge));
}
return true;
}
UNMAP_AFTER_INIT bool Access::initialize_for_multiple_pci_domains(PhysicalAddress mcfg_table)
{
VERIFY(!Access::is_initialized());
auto* access = new Access();
if (!access->find_and_register_pci_host_bridges_from_acpi_mcfg_table(mcfg_table))
return false;
access->rescan_hardware();
dbgln_if(PCI_DEBUG, "PCI: access for multiple PCI domain initialised.");
return true;
}
#if ARCH(X86_64)
UNMAP_AFTER_INIT bool Access::initialize_for_one_pci_domain()
{
VERIFY(!Access::is_initialized());
auto* access = new Access();
auto host_bridge = HostBridge::must_create_with_io_access();
access->add_host_controller(move(host_bridge));
access->rescan_hardware();
dbgln_if(PCI_DEBUG, "PCI: access for one PCI domain initialised.");
return true;
}
#endif
ErrorOr<void> Access::add_host_controller_and_scan_for_devices(NonnullOwnPtr<HostController> controller)
{
SpinlockLocker locker(m_access_lock);
SpinlockLocker scan_locker(m_scan_lock);
auto domain_number = controller->domain_number();
VERIFY(!m_host_controllers.contains(domain_number));
// Note: We need to register the new controller as soon as possible, and
// definitely before enumerating devices behind that.
m_host_controllers.set(domain_number, move(controller));
ErrorOr<void> error_or_void {};
m_host_controllers.get(domain_number).value()->enumerate_attached_devices([&](EnumerableDeviceIdentifier const& device_identifier) -> IterationDecision {
auto device_identifier_or_error = DeviceIdentifier::from_enumerable_identifier(device_identifier);
if (device_identifier_or_error.is_error()) {
error_or_void = device_identifier_or_error.error();
return IterationDecision::Break;
}
m_device_identifiers.append(device_identifier_or_error.release_value());
return IterationDecision::Continue;
});
return {};
}
UNMAP_AFTER_INIT void Access::add_host_controller(NonnullOwnPtr<HostController> controller)
{
auto domain_number = controller->domain_number();
m_host_controllers.set(domain_number, move(controller));
}
UNMAP_AFTER_INIT Access::Access()
{
s_access = this;
}
UNMAP_AFTER_INIT void Access::rescan_hardware()
{
SpinlockLocker locker(m_access_lock);
SpinlockLocker scan_locker(m_scan_lock);
VERIFY(m_device_identifiers.is_empty());
ErrorOr<void> error_or_void {};
for (auto it = m_host_controllers.begin(); it != m_host_controllers.end(); ++it) {
(*it).value->enumerate_attached_devices([this, &error_or_void](EnumerableDeviceIdentifier device_identifier) -> IterationDecision {
auto device_identifier_or_error = DeviceIdentifier::from_enumerable_identifier(device_identifier);
if (device_identifier_or_error.is_error()) {
error_or_void = device_identifier_or_error.error();
return IterationDecision::Break;
}
m_device_identifiers.append(device_identifier_or_error.release_value());
return IterationDecision::Continue;
});
}
if (error_or_void.is_error()) {
dmesgln("Failed during PCI Access::rescan_hardware due to {}", error_or_void.error());
VERIFY_NOT_REACHED();
}
}
ErrorOr<void> Access::fast_enumerate(Function<void(DeviceIdentifier const&)>& callback) const
{
// Note: We hold the m_access_lock for a brief moment just to ensure we get
// a complete Vector in case someone wants to mutate it.
NonnullRefPtrVector<DeviceIdentifier> device_identifiers;
{
SpinlockLocker locker(m_access_lock);
VERIFY(!m_device_identifiers.is_empty());
TRY(device_identifiers.try_extend(m_device_identifiers));
}
for (auto const& device_identifier : device_identifiers) {
callback(device_identifier);
}
return {};
}
DeviceIdentifier const& Access::get_device_identifier(Address address) const
{
for (auto& device_identifier : m_device_identifiers) {
if (device_identifier.address().domain() == address.domain()
&& device_identifier.address().bus() == address.bus()
&& device_identifier.address().device() == address.device()
&& device_identifier.address().function() == address.function()) {
return device_identifier;
}
}
VERIFY_NOT_REACHED();
}
void Access::write8_field(DeviceIdentifier const& identifier, u32 field, u8 value)
{
VERIFY(identifier.operation_lock().is_locked());
SpinlockLocker locker(m_access_lock);
VERIFY(m_host_controllers.contains(identifier.address().domain()));
auto& controller = *m_host_controllers.get(identifier.address().domain()).value();
controller.write8_field(identifier.address().bus(), identifier.address().device(), identifier.address().function(), field, value);
}
void Access::write16_field(DeviceIdentifier const& identifier, u32 field, u16 value)
{
VERIFY(identifier.operation_lock().is_locked());
SpinlockLocker locker(m_access_lock);
VERIFY(m_host_controllers.contains(identifier.address().domain()));
auto& controller = *m_host_controllers.get(identifier.address().domain()).value();
controller.write16_field(identifier.address().bus(), identifier.address().device(), identifier.address().function(), field, value);
}
void Access::write32_field(DeviceIdentifier const& identifier, u32 field, u32 value)
{
VERIFY(identifier.operation_lock().is_locked());
SpinlockLocker locker(m_access_lock);
VERIFY(m_host_controllers.contains(identifier.address().domain()));
auto& controller = *m_host_controllers.get(identifier.address().domain()).value();
controller.write32_field(identifier.address().bus(), identifier.address().device(), identifier.address().function(), field, value);
}
u8 Access::read8_field(DeviceIdentifier const& identifier, RegisterOffset field)
{
VERIFY(identifier.operation_lock().is_locked());
return read8_field(identifier, to_underlying(field));
}
u16 Access::read16_field(DeviceIdentifier const& identifier, RegisterOffset field)
{
VERIFY(identifier.operation_lock().is_locked());
return read16_field(identifier, to_underlying(field));
}
u8 Access::read8_field(DeviceIdentifier const& identifier, u32 field)
{
VERIFY(identifier.operation_lock().is_locked());
SpinlockLocker locker(m_access_lock);
VERIFY(m_host_controllers.contains(identifier.address().domain()));
auto& controller = *m_host_controllers.get(identifier.address().domain()).value();
return controller.read8_field(identifier.address().bus(), identifier.address().device(), identifier.address().function(), field);
}
u16 Access::read16_field(DeviceIdentifier const& identifier, u32 field)
{
VERIFY(identifier.operation_lock().is_locked());
SpinlockLocker locker(m_access_lock);
VERIFY(m_host_controllers.contains(identifier.address().domain()));
auto& controller = *m_host_controllers.get(identifier.address().domain()).value();
return controller.read16_field(identifier.address().bus(), identifier.address().device(), identifier.address().function(), field);
}
u32 Access::read32_field(DeviceIdentifier const& identifier, u32 field)
{
VERIFY(identifier.operation_lock().is_locked());
SpinlockLocker locker(m_access_lock);
VERIFY(m_host_controllers.contains(identifier.address().domain()));
auto& controller = *m_host_controllers.get(identifier.address().domain()).value();
return controller.read32_field(identifier.address().bus(), identifier.address().device(), identifier.address().function(), field);
}
}