Device.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Bus/PCI/API.h>
  7. #include <Kernel/Bus/PCI/IDs.h>
  8. #include <Kernel/Bus/VirtIO/Console.h>
  9. #include <Kernel/Bus/VirtIO/Device.h>
  10. #include <Kernel/Bus/VirtIO/RNG.h>
  11. #include <Kernel/CommandLine.h>
  12. #include <Kernel/Sections.h>
  13. namespace Kernel::VirtIO {
  14. UNMAP_AFTER_INIT void detect()
  15. {
  16. if (kernel_command_line().disable_virtio())
  17. return;
  18. PCI::enumerate([&](const PCI::Address& address, PCI::ID id) {
  19. if (address.is_null() || id.is_null())
  20. return;
  21. // TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
  22. if (id.vendor_id != PCI::VendorID::VirtIO)
  23. return;
  24. switch (id.device_id) {
  25. case PCI::DeviceID::VirtIOConsole: {
  26. auto& console = Console::must_create(address).leak_ref();
  27. console.initialize();
  28. break;
  29. }
  30. case PCI::DeviceID::VirtIOEntropy: {
  31. auto& rng = RNG::must_create(address).leak_ref();
  32. rng.initialize();
  33. break;
  34. }
  35. case PCI::DeviceID::VirtIOGPU: {
  36. // This should have been initialized by the graphics subsystem
  37. break;
  38. }
  39. default:
  40. dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", id.device_id);
  41. break;
  42. }
  43. });
  44. }
  45. StringView determine_device_class(const PCI::Address& address)
  46. {
  47. if (PCI::get_revision_id(address) == 0) {
  48. // Note: If the device is a legacy (or transitional) device, therefore,
  49. // probe the subsystem ID in the PCI header and figure out the
  50. auto subsystem_device_id = PCI::get_subsystem_id(address);
  51. switch (subsystem_device_id) {
  52. case 1:
  53. return "VirtIONetAdapter";
  54. case 2:
  55. return "VirtIOBlockDevice";
  56. case 3:
  57. return "VirtIOConsole";
  58. case 4:
  59. return "VirtIORNG";
  60. }
  61. dbgln("VirtIO: Unknown subsystem_device_id {}", subsystem_device_id);
  62. VERIFY_NOT_REACHED();
  63. }
  64. auto id = PCI::get_id(address);
  65. VERIFY(id.vendor_id == PCI::VendorID::VirtIO);
  66. switch (id.device_id) {
  67. case PCI::DeviceID::VirtIONetAdapter:
  68. return "VirtIONetAdapter";
  69. case PCI::DeviceID::VirtIOBlockDevice:
  70. return "VirtIOBlockDevice";
  71. case PCI::DeviceID::VirtIOConsole:
  72. return "VirtIOConsole";
  73. case PCI::DeviceID::VirtIOEntropy:
  74. return "VirtIORNG";
  75. case PCI::DeviceID::VirtIOGPU:
  76. return "VirtIOGPU";
  77. }
  78. dbgln("VirtIO: Unknown device_id {}", id.vendor_id);
  79. VERIFY_NOT_REACHED();
  80. }
  81. UNMAP_AFTER_INIT void Device::initialize()
  82. {
  83. auto address = pci_address();
  84. enable_bus_mastering(pci_address());
  85. PCI::enable_interrupt_line(pci_address());
  86. enable_irq();
  87. auto capabilities = PCI::get_physical_id(address).capabilities();
  88. for (auto& capability : capabilities) {
  89. if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
  90. // We have a virtio_pci_cap
  91. auto cfg = make<Configuration>();
  92. auto raw_config_type = capability.read8(0x3);
  93. if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
  94. dbgln("{}: Unknown capability configuration type: {}", VirtIO::determine_device_class(address), raw_config_type);
  95. return;
  96. }
  97. cfg->cfg_type = static_cast<ConfigurationType>(raw_config_type);
  98. auto cap_length = capability.read8(0x2);
  99. if (cap_length < 0x10) {
  100. dbgln("{}: Unexpected capability size: {}", VirtIO::determine_device_class(address), cap_length);
  101. break;
  102. }
  103. cfg->bar = capability.read8(0x4);
  104. if (cfg->bar > 0x5) {
  105. dbgln("{}: Unexpected capability bar value: {}", VirtIO::determine_device_class(address), cfg->bar);
  106. break;
  107. }
  108. cfg->offset = capability.read32(0x8);
  109. cfg->length = capability.read32(0xc);
  110. dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", VirtIO::determine_device_class(address), (u32)cfg->cfg_type, cfg->bar, cfg->offset, cfg->length);
  111. if (cfg->cfg_type == ConfigurationType::Common)
  112. m_use_mmio = true;
  113. else if (cfg->cfg_type == ConfigurationType::Notify)
  114. m_notify_multiplier = capability.read32(0x10);
  115. m_configs.append(move(cfg));
  116. }
  117. }
  118. if (m_use_mmio) {
  119. m_common_cfg = get_config(ConfigurationType::Common, 0);
  120. m_notify_cfg = get_config(ConfigurationType::Notify, 0);
  121. m_isr_cfg = get_config(ConfigurationType::ISR, 0);
  122. }
  123. reset_device();
  124. set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
  125. set_status_bit(DEVICE_STATUS_DRIVER);
  126. }
  127. UNMAP_AFTER_INIT VirtIO::Device::Device(PCI::Address address)
  128. : PCI::Device(address)
  129. , IRQHandler(PCI::get_interrupt_line(address))
  130. , m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
  131. {
  132. dbgln("{}: Found @ {}", VirtIO::determine_device_class(address), pci_address());
  133. }
  134. auto Device::mapping_for_bar(u8 bar) -> MappedMMIO&
  135. {
  136. VERIFY(m_use_mmio);
  137. auto& mapping = m_mmio[bar];
  138. if (!mapping.base && mapping.size) {
  139. auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
  140. if (region_or_error.is_error()) {
  141. dbgln("{}: Failed to map bar {} - (size={}) {}", VirtIO::determine_device_class(pci_address()), bar, mapping.size, region_or_error.error());
  142. } else {
  143. mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
  144. mapping.base = region_or_error.release_value();
  145. }
  146. }
  147. return mapping;
  148. }
  149. void Device::notify_queue(u16 queue_index)
  150. {
  151. dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", VirtIO::determine_device_class(pci_address()), queue_index);
  152. if (!m_notify_cfg)
  153. out<u16>(REG_QUEUE_NOTIFY, queue_index);
  154. else
  155. config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
  156. }
  157. u8 Device::config_read8(const Configuration& config, u32 offset)
  158. {
  159. return mapping_for_bar(config.bar).read<u8>(config.offset + offset);
  160. }
  161. u16 Device::config_read16(const Configuration& config, u32 offset)
  162. {
  163. return mapping_for_bar(config.bar).read<u16>(config.offset + offset);
  164. }
  165. u32 Device::config_read32(const Configuration& config, u32 offset)
  166. {
  167. return mapping_for_bar(config.bar).read<u32>(config.offset + offset);
  168. }
  169. void Device::config_write8(const Configuration& config, u32 offset, u8 value)
  170. {
  171. mapping_for_bar(config.bar).write(config.offset + offset, value);
  172. }
  173. void Device::config_write16(const Configuration& config, u32 offset, u16 value)
  174. {
  175. mapping_for_bar(config.bar).write(config.offset + offset, value);
  176. }
  177. void Device::config_write32(const Configuration& config, u32 offset, u32 value)
  178. {
  179. mapping_for_bar(config.bar).write(config.offset + offset, value);
  180. }
  181. void Device::config_write64(const Configuration& config, u32 offset, u64 value)
  182. {
  183. mapping_for_bar(config.bar).write(config.offset + offset, value);
  184. }
  185. u8 Device::read_status_bits()
  186. {
  187. if (!m_common_cfg)
  188. return in<u8>(REG_DEVICE_STATUS);
  189. return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
  190. }
  191. void Device::mask_status_bits(u8 status_mask)
  192. {
  193. m_status &= status_mask;
  194. if (!m_common_cfg)
  195. out<u8>(REG_DEVICE_STATUS, m_status);
  196. else
  197. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  198. }
  199. void Device::set_status_bit(u8 status_bit)
  200. {
  201. m_status |= status_bit;
  202. if (!m_common_cfg)
  203. out<u8>(REG_DEVICE_STATUS, m_status);
  204. else
  205. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  206. }
  207. u64 Device::get_device_features()
  208. {
  209. if (!m_common_cfg)
  210. return in<u32>(REG_DEVICE_FEATURES);
  211. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
  212. auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
  213. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
  214. u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
  215. return upper_bits | lower_bits;
  216. }
  217. bool Device::accept_device_features(u64 device_features, u64 accepted_features)
  218. {
  219. VERIFY(!m_did_accept_features);
  220. m_did_accept_features = true;
  221. if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
  222. accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
  223. }
  224. if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
  225. dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", VirtIO::determine_device_class(pci_address()));
  226. accepted_features &= ~(VIRTIO_F_RING_PACKED);
  227. }
  228. // TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
  229. if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
  230. // accepted_features |= VIRTIO_F_INDIRECT_DESC;
  231. }
  232. if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
  233. accepted_features |= VIRTIO_F_IN_ORDER;
  234. }
  235. dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", VirtIO::determine_device_class(pci_address()), device_features);
  236. dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", VirtIO::determine_device_class(pci_address()), accepted_features);
  237. if (!m_common_cfg) {
  238. out<u32>(REG_GUEST_FEATURES, accepted_features);
  239. } else {
  240. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
  241. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
  242. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
  243. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
  244. }
  245. set_status_bit(DEVICE_STATUS_FEATURES_OK);
  246. m_status = read_status_bits();
  247. if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
  248. set_status_bit(DEVICE_STATUS_FAILED);
  249. dbgln("{}: Features not accepted by host!", VirtIO::determine_device_class(pci_address()));
  250. return false;
  251. }
  252. m_accepted_features = accepted_features;
  253. dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", VirtIO::determine_device_class(pci_address()));
  254. return true;
  255. }
  256. void Device::reset_device()
  257. {
  258. dbgln_if(VIRTIO_DEBUG, "{}: Reset device", VirtIO::determine_device_class(pci_address()));
  259. if (!m_common_cfg) {
  260. mask_status_bits(0);
  261. while (read_status_bits() != 0) {
  262. // TODO: delay a bit?
  263. }
  264. return;
  265. }
  266. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
  267. while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
  268. // TODO: delay a bit?
  269. }
  270. }
  271. bool Device::setup_queue(u16 queue_index)
  272. {
  273. if (!m_common_cfg)
  274. return false;
  275. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  276. u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
  277. if (queue_size == 0) {
  278. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", VirtIO::determine_device_class(pci_address()), queue_index);
  279. return true;
  280. }
  281. u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
  282. auto queue = make<Queue>(queue_size, queue_notify_offset);
  283. if (queue->is_null())
  284. return false;
  285. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
  286. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
  287. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
  288. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", VirtIO::determine_device_class(pci_address()), queue_index, queue_size);
  289. m_queues.append(move(queue));
  290. return true;
  291. }
  292. bool Device::activate_queue(u16 queue_index)
  293. {
  294. if (!m_common_cfg)
  295. return false;
  296. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  297. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
  298. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", VirtIO::determine_device_class(pci_address()), queue_index);
  299. return true;
  300. }
  301. bool Device::setup_queues(u16 requested_queue_count)
  302. {
  303. VERIFY(!m_did_setup_queues);
  304. m_did_setup_queues = true;
  305. if (m_common_cfg) {
  306. auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
  307. if (requested_queue_count == 0) {
  308. m_queue_count = maximum_queue_count;
  309. } else if (requested_queue_count > maximum_queue_count) {
  310. dbgln("{}: {} queues requested but only {} available!", VirtIO::determine_device_class(pci_address()), m_queue_count, maximum_queue_count);
  311. return false;
  312. } else {
  313. m_queue_count = requested_queue_count;
  314. }
  315. } else {
  316. m_queue_count = requested_queue_count;
  317. dbgln("{}: device's available queue count could not be determined!", VirtIO::determine_device_class(pci_address()));
  318. }
  319. dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", VirtIO::determine_device_class(pci_address()), m_queue_count);
  320. for (u16 i = 0; i < m_queue_count; i++) {
  321. if (!setup_queue(i))
  322. return false;
  323. }
  324. for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
  325. if (!activate_queue(i))
  326. return false;
  327. }
  328. return true;
  329. }
  330. void Device::finish_init()
  331. {
  332. VERIFY(m_did_accept_features); // ensure features were negotiated
  333. VERIFY(m_did_setup_queues); // ensure queues were set-up
  334. VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
  335. set_status_bit(DEVICE_STATUS_DRIVER_OK);
  336. dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", VirtIO::determine_device_class(pci_address()));
  337. }
  338. u8 Device::isr_status()
  339. {
  340. if (!m_isr_cfg)
  341. return in<u8>(REG_ISR_STATUS);
  342. return config_read8(*m_isr_cfg, 0);
  343. }
  344. bool Device::handle_irq(const RegisterState&)
  345. {
  346. u8 isr_type = isr_status();
  347. if ((isr_type & (QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT)) == 0) {
  348. dbgln_if(VIRTIO_DEBUG, "{}: Handling interrupt with unknown type: {}", VirtIO::determine_device_class(pci_address()), isr_type);
  349. return false;
  350. }
  351. if (isr_type & DEVICE_CONFIG_INTERRUPT) {
  352. dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Device config interrupt!", VirtIO::determine_device_class(pci_address()));
  353. if (!handle_device_config_change()) {
  354. set_status_bit(DEVICE_STATUS_FAILED);
  355. dbgln("{}: Failed to handle device config change!", VirtIO::determine_device_class(pci_address()));
  356. }
  357. }
  358. if (isr_type & QUEUE_INTERRUPT) {
  359. dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Queue interrupt!", VirtIO::determine_device_class(pci_address()));
  360. for (size_t i = 0; i < m_queues.size(); i++) {
  361. if (get_queue(i).new_data_available()) {
  362. handle_queue_update(i);
  363. return true;
  364. }
  365. }
  366. dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", VirtIO::determine_device_class(pci_address()));
  367. }
  368. return true;
  369. }
  370. void Device::supply_chain_and_notify(u16 queue_index, QueueChain& chain)
  371. {
  372. auto& queue = get_queue(queue_index);
  373. VERIFY(&chain.queue() == &queue);
  374. VERIFY(queue.lock().is_locked());
  375. chain.submit_to_queue();
  376. if (queue.should_notify())
  377. notify_queue(queue_index);
  378. }
  379. }