Device.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Bus/PCI/IDs.h>
  7. #include <Kernel/Bus/VirtIO/Console.h>
  8. #include <Kernel/Bus/VirtIO/Device.h>
  9. #include <Kernel/Bus/VirtIO/RNG.h>
  10. #include <Kernel/CommandLine.h>
  11. #include <Kernel/Sections.h>
  12. namespace Kernel::VirtIO {
  13. UNMAP_AFTER_INIT void detect()
  14. {
  15. if (kernel_command_line().disable_virtio())
  16. return;
  17. PCI::enumerate([&](const PCI::Address& address, PCI::ID id) {
  18. if (address.is_null() || id.is_null())
  19. return;
  20. // TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
  21. if (id.vendor_id != PCI::VendorID::VirtIO)
  22. return;
  23. switch (id.device_id) {
  24. case PCI::DeviceID::VirtIOConsole: {
  25. [[maybe_unused]] auto& unused = adopt_ref(*new Console(address)).leak_ref();
  26. break;
  27. }
  28. case PCI::DeviceID::VirtIOEntropy: {
  29. [[maybe_unused]] auto& unused = adopt_ref(*new RNG(address)).leak_ref();
  30. break;
  31. }
  32. case PCI::DeviceID::VirtIOGPU: {
  33. // This should have been initialized by the graphics subsystem
  34. break;
  35. }
  36. default:
  37. dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", id.device_id);
  38. break;
  39. }
  40. });
  41. }
  42. StringView determine_device_class(const PCI::Address& address)
  43. {
  44. auto subsystem_device_id = PCI::get_subsystem_id(address);
  45. switch (subsystem_device_id) {
  46. case 1:
  47. return "VirtIONetAdapter";
  48. case 2:
  49. return "VirtIOBlockDevice";
  50. case 3:
  51. return "VirtIOConsole";
  52. case 4:
  53. return "VirtIORNG";
  54. }
  55. dbgln("VirtIO: Unknown subsystem_device_id {}", subsystem_device_id);
  56. VERIFY_NOT_REACHED();
  57. }
  58. UNMAP_AFTER_INIT VirtIO::Device::Device(PCI::Address address)
  59. : PCI::Device(address)
  60. , IRQHandler(PCI::get_interrupt_line(address))
  61. , m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
  62. {
  63. dbgln("{}: Found @ {}", VirtIO::determine_device_class(address), pci_address());
  64. enable_bus_mastering(pci_address());
  65. PCI::enable_interrupt_line(pci_address());
  66. enable_irq();
  67. auto capabilities = PCI::get_physical_id(address).capabilities();
  68. for (auto& capability : capabilities) {
  69. if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
  70. // We have a virtio_pci_cap
  71. auto cfg = make<Configuration>();
  72. auto raw_config_type = capability.read8(0x3);
  73. if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
  74. dbgln("{}: Unknown capability configuration type: {}", VirtIO::determine_device_class(address), raw_config_type);
  75. return;
  76. }
  77. cfg->cfg_type = static_cast<ConfigurationType>(raw_config_type);
  78. auto cap_length = capability.read8(0x2);
  79. if (cap_length < 0x10) {
  80. dbgln("{}: Unexpected capability size: {}", VirtIO::determine_device_class(address), cap_length);
  81. break;
  82. }
  83. cfg->bar = capability.read8(0x4);
  84. if (cfg->bar > 0x5) {
  85. dbgln("{}: Unexpected capability bar value: {}", VirtIO::determine_device_class(address), cfg->bar);
  86. break;
  87. }
  88. cfg->offset = capability.read32(0x8);
  89. cfg->length = capability.read32(0xc);
  90. dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", VirtIO::determine_device_class(address), (u32)cfg->cfg_type, cfg->bar, cfg->offset, cfg->length);
  91. if (cfg->cfg_type == ConfigurationType::Common)
  92. m_use_mmio = true;
  93. else if (cfg->cfg_type == ConfigurationType::Notify)
  94. m_notify_multiplier = capability.read32(0x10);
  95. m_configs.append(move(cfg));
  96. }
  97. }
  98. if (m_use_mmio) {
  99. m_common_cfg = get_config(ConfigurationType::Common, 0);
  100. m_notify_cfg = get_config(ConfigurationType::Notify, 0);
  101. m_isr_cfg = get_config(ConfigurationType::ISR, 0);
  102. }
  103. reset_device();
  104. set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
  105. set_status_bit(DEVICE_STATUS_DRIVER);
  106. }
  107. Device::~Device()
  108. {
  109. }
  110. auto Device::mapping_for_bar(u8 bar) -> MappedMMIO&
  111. {
  112. VERIFY(m_use_mmio);
  113. auto& mapping = m_mmio[bar];
  114. if (!mapping.base) {
  115. mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
  116. mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
  117. if (!mapping.base)
  118. dbgln("{}: Failed to map bar {}", VirtIO::determine_device_class(pci_address()), bar);
  119. }
  120. return mapping;
  121. }
  122. void Device::notify_queue(u16 queue_index)
  123. {
  124. dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", VirtIO::determine_device_class(pci_address()), queue_index);
  125. if (!m_notify_cfg)
  126. out<u16>(REG_QUEUE_NOTIFY, queue_index);
  127. else
  128. config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
  129. }
  130. u8 Device::config_read8(const Configuration& config, u32 offset)
  131. {
  132. return mapping_for_bar(config.bar).read<u8>(config.offset + offset);
  133. }
  134. u16 Device::config_read16(const Configuration& config, u32 offset)
  135. {
  136. return mapping_for_bar(config.bar).read<u16>(config.offset + offset);
  137. }
  138. u32 Device::config_read32(const Configuration& config, u32 offset)
  139. {
  140. return mapping_for_bar(config.bar).read<u32>(config.offset + offset);
  141. }
  142. void Device::config_write8(const Configuration& config, u32 offset, u8 value)
  143. {
  144. mapping_for_bar(config.bar).write(config.offset + offset, value);
  145. }
  146. void Device::config_write16(const Configuration& config, u32 offset, u16 value)
  147. {
  148. mapping_for_bar(config.bar).write(config.offset + offset, value);
  149. }
  150. void Device::config_write32(const Configuration& config, u32 offset, u32 value)
  151. {
  152. mapping_for_bar(config.bar).write(config.offset + offset, value);
  153. }
  154. void Device::config_write64(const Configuration& config, u32 offset, u64 value)
  155. {
  156. mapping_for_bar(config.bar).write(config.offset + offset, value);
  157. }
  158. u8 Device::read_status_bits()
  159. {
  160. if (!m_common_cfg)
  161. return in<u8>(REG_DEVICE_STATUS);
  162. return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
  163. }
  164. void Device::mask_status_bits(u8 status_mask)
  165. {
  166. m_status &= status_mask;
  167. if (!m_common_cfg)
  168. out<u8>(REG_DEVICE_STATUS, m_status);
  169. else
  170. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  171. }
  172. void Device::set_status_bit(u8 status_bit)
  173. {
  174. m_status |= status_bit;
  175. if (!m_common_cfg)
  176. out<u8>(REG_DEVICE_STATUS, m_status);
  177. else
  178. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  179. }
  180. u64 Device::get_device_features()
  181. {
  182. if (!m_common_cfg)
  183. return in<u32>(REG_DEVICE_FEATURES);
  184. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
  185. auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
  186. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
  187. u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
  188. return upper_bits | lower_bits;
  189. }
  190. bool Device::accept_device_features(u64 device_features, u64 accepted_features)
  191. {
  192. VERIFY(!m_did_accept_features);
  193. m_did_accept_features = true;
  194. if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
  195. accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
  196. }
  197. if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
  198. dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", VirtIO::determine_device_class(pci_address()));
  199. accepted_features &= ~(VIRTIO_F_RING_PACKED);
  200. }
  201. // TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
  202. if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
  203. // accepted_features |= VIRTIO_F_INDIRECT_DESC;
  204. }
  205. if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
  206. accepted_features |= VIRTIO_F_IN_ORDER;
  207. }
  208. dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", VirtIO::determine_device_class(pci_address()), device_features);
  209. dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", VirtIO::determine_device_class(pci_address()), accepted_features);
  210. if (!m_common_cfg) {
  211. out<u32>(REG_GUEST_FEATURES, accepted_features);
  212. } else {
  213. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
  214. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
  215. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
  216. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
  217. }
  218. set_status_bit(DEVICE_STATUS_FEATURES_OK);
  219. m_status = read_status_bits();
  220. if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
  221. set_status_bit(DEVICE_STATUS_FAILED);
  222. dbgln("{}: Features not accepted by host!", VirtIO::determine_device_class(pci_address()));
  223. return false;
  224. }
  225. m_accepted_features = accepted_features;
  226. dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", VirtIO::determine_device_class(pci_address()));
  227. return true;
  228. }
  229. void Device::reset_device()
  230. {
  231. dbgln_if(VIRTIO_DEBUG, "{}: Reset device", VirtIO::determine_device_class(pci_address()));
  232. if (!m_common_cfg) {
  233. mask_status_bits(0);
  234. while (read_status_bits() != 0) {
  235. // TODO: delay a bit?
  236. }
  237. return;
  238. }
  239. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
  240. while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
  241. // TODO: delay a bit?
  242. }
  243. }
  244. bool Device::setup_queue(u16 queue_index)
  245. {
  246. if (!m_common_cfg)
  247. return false;
  248. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  249. u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
  250. if (queue_size == 0) {
  251. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", VirtIO::determine_device_class(pci_address()), queue_index);
  252. return true;
  253. }
  254. u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
  255. auto queue = make<Queue>(queue_size, queue_notify_offset);
  256. if (queue->is_null())
  257. return false;
  258. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
  259. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
  260. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
  261. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", VirtIO::determine_device_class(pci_address()), queue_index, queue_size);
  262. m_queues.append(move(queue));
  263. return true;
  264. }
  265. bool Device::activate_queue(u16 queue_index)
  266. {
  267. if (!m_common_cfg)
  268. return false;
  269. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  270. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
  271. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", VirtIO::determine_device_class(pci_address()), queue_index);
  272. return true;
  273. }
  274. bool Device::setup_queues(u16 requested_queue_count)
  275. {
  276. VERIFY(!m_did_setup_queues);
  277. m_did_setup_queues = true;
  278. if (m_common_cfg) {
  279. auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
  280. if (requested_queue_count == 0) {
  281. m_queue_count = maximum_queue_count;
  282. } else if (requested_queue_count > maximum_queue_count) {
  283. dbgln("{}: {} queues requested but only {} available!", VirtIO::determine_device_class(pci_address()), m_queue_count, maximum_queue_count);
  284. return false;
  285. } else {
  286. m_queue_count = requested_queue_count;
  287. }
  288. } else {
  289. m_queue_count = requested_queue_count;
  290. dbgln("{}: device's available queue count could not be determined!", VirtIO::determine_device_class(pci_address()));
  291. }
  292. dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", VirtIO::determine_device_class(pci_address()), m_queue_count);
  293. for (u16 i = 0; i < m_queue_count; i++) {
  294. if (!setup_queue(i))
  295. return false;
  296. }
  297. for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
  298. if (!activate_queue(i))
  299. return false;
  300. }
  301. return true;
  302. }
  303. void Device::finish_init()
  304. {
  305. VERIFY(m_did_accept_features); // ensure features were negotiated
  306. VERIFY(m_did_setup_queues); // ensure queues were set-up
  307. VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
  308. set_status_bit(DEVICE_STATUS_DRIVER_OK);
  309. dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", VirtIO::determine_device_class(pci_address()));
  310. }
  311. u8 Device::isr_status()
  312. {
  313. if (!m_isr_cfg)
  314. return in<u8>(REG_ISR_STATUS);
  315. return config_read8(*m_isr_cfg, 0);
  316. }
  317. bool Device::handle_irq(const RegisterState&)
  318. {
  319. u8 isr_type = isr_status();
  320. if ((isr_type & (QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT)) == 0) {
  321. dbgln_if(VIRTIO_DEBUG, "{}: Handling interrupt with unknown type: {}", VirtIO::determine_device_class(pci_address()), isr_type);
  322. return false;
  323. }
  324. if (isr_type & DEVICE_CONFIG_INTERRUPT) {
  325. dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Device config interrupt!", VirtIO::determine_device_class(pci_address()));
  326. if (!handle_device_config_change()) {
  327. set_status_bit(DEVICE_STATUS_FAILED);
  328. dbgln("{}: Failed to handle device config change!", VirtIO::determine_device_class(pci_address()));
  329. }
  330. }
  331. if (isr_type & QUEUE_INTERRUPT) {
  332. dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Queue interrupt!", VirtIO::determine_device_class(pci_address()));
  333. for (size_t i = 0; i < m_queues.size(); i++) {
  334. if (get_queue(i).new_data_available()) {
  335. handle_queue_update(i);
  336. return true;
  337. }
  338. }
  339. dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", VirtIO::determine_device_class(pci_address()));
  340. }
  341. return true;
  342. }
  343. void Device::supply_chain_and_notify(u16 queue_index, QueueChain& chain)
  344. {
  345. auto& queue = get_queue(queue_index);
  346. VERIFY(&chain.queue() == &queue);
  347. VERIFY(queue.lock().is_locked());
  348. chain.submit_to_queue();
  349. if (queue.should_notify())
  350. notify_queue(queue_index);
  351. }
  352. }