MMIOAccess.cpp 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/ByteReader.h>
  7. #include <AK/Optional.h>
  8. #include <Kernel/Arch/x86/InterruptDisabler.h>
  9. #include <Kernel/Bus/PCI/MMIOAccess.h>
  10. #include <Kernel/Debug.h>
  11. #include <Kernel/Memory/MemoryManager.h>
  12. #include <Kernel/Sections.h>
  13. namespace Kernel {
  14. namespace PCI {
  15. #define MEMORY_RANGE_PER_BUS (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS)
  16. u32 MMIOAccess::segment_count() const
  17. {
  18. return m_segments.size();
  19. }
  20. u8 MMIOAccess::segment_start_bus(u32 seg) const
  21. {
  22. auto segment = m_segments.get(seg);
  23. VERIFY(segment.has_value());
  24. return segment.value().get_start_bus();
  25. }
  26. u8 MMIOAccess::segment_end_bus(u32 seg) const
  27. {
  28. auto segment = m_segments.get(seg);
  29. VERIFY(segment.has_value());
  30. return segment.value().get_end_bus();
  31. }
  32. PhysicalAddress MMIOAccess::determine_memory_mapped_bus_region(u32 segment, u8 bus) const
  33. {
  34. VERIFY(bus >= segment_start_bus(segment) && bus <= segment_end_bus(segment));
  35. auto seg = m_segments.get(segment);
  36. VERIFY(seg.has_value());
  37. return seg.value().get_paddr().offset(MEMORY_RANGE_PER_BUS * (bus - seg.value().get_start_bus()));
  38. }
  39. UNMAP_AFTER_INIT void MMIOAccess::initialize(PhysicalAddress mcfg)
  40. {
  41. if (!Access::is_initialized()) {
  42. new MMIOAccess(mcfg);
  43. dbgln_if(PCI_DEBUG, "PCI: MMIO access initialised.");
  44. }
  45. }
  46. UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
  47. : m_mcfg(p_mcfg)
  48. {
  49. dmesgln("PCI: Using MMIO for PCI configuration space access");
  50. auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::ReadWrite);
  51. dbgln_if(PCI_DEBUG, "PCI: Checking MCFG Table length to choose the correct mapping size");
  52. auto* sdt = (ACPI::Structures::SDTHeader*)checkup_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
  53. u32 length = sdt->length;
  54. u8 revision = sdt->revision;
  55. dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
  56. checkup_region->unmap();
  57. auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::ReadWrite);
  58. auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
  59. dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get()));
  60. for (u32 index = 0; index < ((mcfg.header.length - sizeof(ACPI::Structures::MCFG)) / sizeof(ACPI::Structures::PCI_MMIO_Descriptor)); index++) {
  61. u8 start_bus = mcfg.descriptors[index].start_pci_bus;
  62. u8 end_bus = mcfg.descriptors[index].end_pci_bus;
  63. u32 lower_addr = mcfg.descriptors[index].base_addr;
  64. m_segments.set(index, { PhysicalAddress(lower_addr), start_bus, end_bus });
  65. dmesgln("PCI: New PCI segment @ {}, PCI buses ({}-{})", PhysicalAddress { lower_addr }, start_bus, end_bus);
  66. }
  67. mcfg_region->unmap();
  68. dmesgln("PCI: MMIO segments: {}", m_segments.size());
  69. InterruptDisabler disabler;
  70. VERIFY(m_segments.contains(0));
  71. // Note: we need to map this region before enumerating the hardware and adding
  72. // PCI::PhysicalID objects to the vector, because get_capabilities calls
  73. // PCI::read16 which will need this region to be mapped.
  74. u8 start_bus = m_segments.get(0).value().get_start_bus();
  75. m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite);
  76. m_mapped_bus = start_bus;
  77. dbgln_if(PCI_DEBUG, "PCI: First PCI ECAM Mapped region for starting bus {} @ {} {}", start_bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
  78. enumerate_hardware([&](const Address& address, ID id) {
  79. m_physical_ids.append({ address, id, get_capabilities(address) });
  80. });
  81. }
  82. void MMIOAccess::map_bus_region(u32 segment, u8 bus)
  83. {
  84. VERIFY(m_access_lock.is_locked());
  85. if (m_mapped_bus == bus)
  86. return;
  87. m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite);
  88. m_mapped_bus = bus;
  89. dbgln_if(PCI_DEBUG, "PCI: New PCI ECAM Mapped region for bus {} @ {} {}", bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
  90. }
  91. VirtualAddress MMIOAccess::get_device_configuration_space(Address address)
  92. {
  93. VERIFY(m_access_lock.is_locked());
  94. dbgln_if(PCI_DEBUG, "PCI: Getting device configuration space for {}", address);
  95. map_bus_region(address.seg(), address.bus());
  96. return m_mapped_region->vaddr().offset(PCI_MMIO_CONFIG_SPACE_SIZE * address.function() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE) * address.device());
  97. }
  98. u8 MMIOAccess::read8_field(Address address, u32 field)
  99. {
  100. SpinlockLocker lock(m_access_lock);
  101. VERIFY(field <= 0xfff);
  102. dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 8-bit field {:#08x} for {}", field, address);
  103. return *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff)));
  104. }
  105. u16 MMIOAccess::read16_field(Address address, u32 field)
  106. {
  107. SpinlockLocker lock(m_access_lock);
  108. VERIFY(field < 0xfff);
  109. dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 16-bit field {:#08x} for {}", field, address);
  110. u16 data = 0;
  111. ByteReader::load<u16>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), data);
  112. return data;
  113. }
  114. u32 MMIOAccess::read32_field(Address address, u32 field)
  115. {
  116. SpinlockLocker lock(m_access_lock);
  117. VERIFY(field <= 0xffc);
  118. dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 32-bit field {:#08x} for {}", field, address);
  119. u32 data = 0;
  120. ByteReader::load<u32>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), data);
  121. return data;
  122. }
  123. void MMIOAccess::write8_field(Address address, u32 field, u8 value)
  124. {
  125. SpinlockLocker lock(m_access_lock);
  126. VERIFY(field <= 0xfff);
  127. dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 8-bit field {:#08x}, value={:#02x} for {}", field, value, address);
  128. *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))) = value;
  129. }
  130. void MMIOAccess::write16_field(Address address, u32 field, u16 value)
  131. {
  132. SpinlockLocker lock(m_access_lock);
  133. VERIFY(field < 0xfff);
  134. dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 16-bit field {:#08x}, value={:#02x} for {}", field, value, address);
  135. ByteReader::store<u16>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value);
  136. }
  137. void MMIOAccess::write32_field(Address address, u32 field, u32 value)
  138. {
  139. SpinlockLocker lock(m_access_lock);
  140. VERIFY(field <= 0xffc);
  141. dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 32-bit field {:#08x}, value={:#02x} for {}", field, value, address);
  142. ByteReader::store<u32>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value);
  143. }
  144. void MMIOAccess::enumerate_hardware(Function<void(Address, ID)> callback)
  145. {
  146. for (u16 seg = 0; seg < m_segments.size(); seg++) {
  147. dbgln_if(PCI_DEBUG, "PCI: Enumerating Memory mapped IO segment {}", seg);
  148. // Single PCI host controller.
  149. if ((early_read8_field(Address(seg), PCI_HEADER_TYPE) & 0x80) == 0) {
  150. enumerate_bus(-1, 0, callback, true);
  151. return;
  152. }
  153. // Multiple PCI host controllers.
  154. for (u8 function = 0; function < 8; ++function) {
  155. if (early_read16_field(Address(seg, 0, 0, function), PCI_VENDOR_ID) == PCI_NONE)
  156. break;
  157. enumerate_bus(-1, function, callback, false);
  158. }
  159. }
  160. }
  161. MMIOAccess::MMIOSegment::MMIOSegment(PhysicalAddress segment_base_addr, u8 start_bus, u8 end_bus)
  162. : m_base_addr(segment_base_addr)
  163. , m_start_bus(start_bus)
  164. , m_end_bus(end_bus)
  165. {
  166. }
  167. u8 MMIOAccess::MMIOSegment::get_start_bus() const
  168. {
  169. return m_start_bus;
  170. }
  171. u8 MMIOAccess::MMIOSegment::get_end_bus() const
  172. {
  173. return m_end_bus;
  174. }
  175. size_t MMIOAccess::MMIOSegment::get_size() const
  176. {
  177. return (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS * (get_end_bus() - get_start_bus()));
  178. }
  179. PhysicalAddress MMIOAccess::MMIOSegment::get_paddr() const
  180. {
  181. return m_base_addr;
  182. }
  183. }
  184. }