Device.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Bus/PCI/IDs.h>
  7. #include <Kernel/Bus/VirtIO/Console.h>
  8. #include <Kernel/Bus/VirtIO/Device.h>
  9. #include <Kernel/Bus/VirtIO/RNG.h>
  10. #include <Kernel/CommandLine.h>
  11. #include <Kernel/Sections.h>
  12. namespace Kernel::VirtIO {
  13. UNMAP_AFTER_INIT void detect()
  14. {
  15. if (kernel_command_line().disable_virtio())
  16. return;
  17. PCI::enumerate([&](const PCI::Address& address, PCI::ID id) {
  18. if (address.is_null() || id.is_null())
  19. return;
  20. // TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
  21. if (id.vendor_id != PCI::VendorID::VirtIO)
  22. return;
  23. switch (id.device_id) {
  24. case PCI::DeviceID::VirtIOConsole: {
  25. auto& console = Console::must_create(address).leak_ref();
  26. console.initialize();
  27. break;
  28. }
  29. case PCI::DeviceID::VirtIOEntropy: {
  30. auto& rng = RNG::must_create(address).leak_ref();
  31. rng.initialize();
  32. break;
  33. }
  34. case PCI::DeviceID::VirtIOGPU: {
  35. // This should have been initialized by the graphics subsystem
  36. break;
  37. }
  38. default:
  39. dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", id.device_id);
  40. break;
  41. }
  42. });
  43. }
  44. StringView determine_device_class(const PCI::Address& address)
  45. {
  46. if (PCI::get_revision_id(address) == 0) {
  47. // Note: If the device is a legacy (or transitional) device, therefore,
  48. // probe the subsystem ID in the PCI header and figure out the
  49. auto subsystem_device_id = PCI::get_subsystem_id(address);
  50. switch (subsystem_device_id) {
  51. case 1:
  52. return "VirtIONetAdapter";
  53. case 2:
  54. return "VirtIOBlockDevice";
  55. case 3:
  56. return "VirtIOConsole";
  57. case 4:
  58. return "VirtIORNG";
  59. }
  60. dbgln("VirtIO: Unknown subsystem_device_id {}", subsystem_device_id);
  61. VERIFY_NOT_REACHED();
  62. }
  63. auto id = PCI::get_id(address);
  64. VERIFY(id.vendor_id == PCI::VendorID::VirtIO);
  65. switch (id.device_id) {
  66. case PCI::DeviceID::VirtIONetAdapter:
  67. return "VirtIONetAdapter";
  68. case PCI::DeviceID::VirtIOBlockDevice:
  69. return "VirtIOBlockDevice";
  70. case PCI::DeviceID::VirtIOConsole:
  71. return "VirtIOConsole";
  72. case PCI::DeviceID::VirtIOEntropy:
  73. return "VirtIORNG";
  74. case PCI::DeviceID::VirtIOGPU:
  75. return "VirtIOGPU";
  76. }
  77. dbgln("VirtIO: Unknown device_id {}", id.vendor_id);
  78. VERIFY_NOT_REACHED();
  79. }
  80. UNMAP_AFTER_INIT void Device::initialize()
  81. {
  82. auto address = pci_address();
  83. enable_bus_mastering(pci_address());
  84. PCI::enable_interrupt_line(pci_address());
  85. enable_irq();
  86. auto capabilities = PCI::get_physical_id(address).capabilities();
  87. for (auto& capability : capabilities) {
  88. if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
  89. // We have a virtio_pci_cap
  90. auto cfg = make<Configuration>();
  91. auto raw_config_type = capability.read8(0x3);
  92. if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
  93. dbgln("{}: Unknown capability configuration type: {}", VirtIO::determine_device_class(address), raw_config_type);
  94. return;
  95. }
  96. cfg->cfg_type = static_cast<ConfigurationType>(raw_config_type);
  97. auto cap_length = capability.read8(0x2);
  98. if (cap_length < 0x10) {
  99. dbgln("{}: Unexpected capability size: {}", VirtIO::determine_device_class(address), cap_length);
  100. break;
  101. }
  102. cfg->bar = capability.read8(0x4);
  103. if (cfg->bar > 0x5) {
  104. dbgln("{}: Unexpected capability bar value: {}", VirtIO::determine_device_class(address), cfg->bar);
  105. break;
  106. }
  107. cfg->offset = capability.read32(0x8);
  108. cfg->length = capability.read32(0xc);
  109. dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", VirtIO::determine_device_class(address), (u32)cfg->cfg_type, cfg->bar, cfg->offset, cfg->length);
  110. if (cfg->cfg_type == ConfigurationType::Common)
  111. m_use_mmio = true;
  112. else if (cfg->cfg_type == ConfigurationType::Notify)
  113. m_notify_multiplier = capability.read32(0x10);
  114. m_configs.append(move(cfg));
  115. }
  116. }
  117. if (m_use_mmio) {
  118. m_common_cfg = get_config(ConfigurationType::Common, 0);
  119. m_notify_cfg = get_config(ConfigurationType::Notify, 0);
  120. m_isr_cfg = get_config(ConfigurationType::ISR, 0);
  121. }
  122. reset_device();
  123. set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
  124. set_status_bit(DEVICE_STATUS_DRIVER);
  125. }
  126. UNMAP_AFTER_INIT VirtIO::Device::Device(PCI::Address address)
  127. : PCI::Device(address)
  128. , IRQHandler(PCI::get_interrupt_line(address))
  129. , m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
  130. {
  131. dbgln("{}: Found @ {}", VirtIO::determine_device_class(address), pci_address());
  132. }
  133. auto Device::mapping_for_bar(u8 bar) -> MappedMMIO&
  134. {
  135. VERIFY(m_use_mmio);
  136. auto& mapping = m_mmio[bar];
  137. if (!mapping.base) {
  138. mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
  139. mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
  140. if (!mapping.base)
  141. dbgln("{}: Failed to map bar {}", VirtIO::determine_device_class(pci_address()), bar);
  142. }
  143. return mapping;
  144. }
  145. void Device::notify_queue(u16 queue_index)
  146. {
  147. dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", VirtIO::determine_device_class(pci_address()), queue_index);
  148. if (!m_notify_cfg)
  149. out<u16>(REG_QUEUE_NOTIFY, queue_index);
  150. else
  151. config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
  152. }
  153. u8 Device::config_read8(const Configuration& config, u32 offset)
  154. {
  155. return mapping_for_bar(config.bar).read<u8>(config.offset + offset);
  156. }
  157. u16 Device::config_read16(const Configuration& config, u32 offset)
  158. {
  159. return mapping_for_bar(config.bar).read<u16>(config.offset + offset);
  160. }
  161. u32 Device::config_read32(const Configuration& config, u32 offset)
  162. {
  163. return mapping_for_bar(config.bar).read<u32>(config.offset + offset);
  164. }
  165. void Device::config_write8(const Configuration& config, u32 offset, u8 value)
  166. {
  167. mapping_for_bar(config.bar).write(config.offset + offset, value);
  168. }
  169. void Device::config_write16(const Configuration& config, u32 offset, u16 value)
  170. {
  171. mapping_for_bar(config.bar).write(config.offset + offset, value);
  172. }
  173. void Device::config_write32(const Configuration& config, u32 offset, u32 value)
  174. {
  175. mapping_for_bar(config.bar).write(config.offset + offset, value);
  176. }
  177. void Device::config_write64(const Configuration& config, u32 offset, u64 value)
  178. {
  179. mapping_for_bar(config.bar).write(config.offset + offset, value);
  180. }
  181. u8 Device::read_status_bits()
  182. {
  183. if (!m_common_cfg)
  184. return in<u8>(REG_DEVICE_STATUS);
  185. return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
  186. }
  187. void Device::mask_status_bits(u8 status_mask)
  188. {
  189. m_status &= status_mask;
  190. if (!m_common_cfg)
  191. out<u8>(REG_DEVICE_STATUS, m_status);
  192. else
  193. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  194. }
  195. void Device::set_status_bit(u8 status_bit)
  196. {
  197. m_status |= status_bit;
  198. if (!m_common_cfg)
  199. out<u8>(REG_DEVICE_STATUS, m_status);
  200. else
  201. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  202. }
  203. u64 Device::get_device_features()
  204. {
  205. if (!m_common_cfg)
  206. return in<u32>(REG_DEVICE_FEATURES);
  207. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
  208. auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
  209. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
  210. u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
  211. return upper_bits | lower_bits;
  212. }
  213. bool Device::accept_device_features(u64 device_features, u64 accepted_features)
  214. {
  215. VERIFY(!m_did_accept_features);
  216. m_did_accept_features = true;
  217. if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
  218. accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
  219. }
  220. if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
  221. dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", VirtIO::determine_device_class(pci_address()));
  222. accepted_features &= ~(VIRTIO_F_RING_PACKED);
  223. }
  224. // TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
  225. if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
  226. // accepted_features |= VIRTIO_F_INDIRECT_DESC;
  227. }
  228. if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
  229. accepted_features |= VIRTIO_F_IN_ORDER;
  230. }
  231. dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", VirtIO::determine_device_class(pci_address()), device_features);
  232. dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", VirtIO::determine_device_class(pci_address()), accepted_features);
  233. if (!m_common_cfg) {
  234. out<u32>(REG_GUEST_FEATURES, accepted_features);
  235. } else {
  236. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
  237. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
  238. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
  239. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
  240. }
  241. set_status_bit(DEVICE_STATUS_FEATURES_OK);
  242. m_status = read_status_bits();
  243. if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
  244. set_status_bit(DEVICE_STATUS_FAILED);
  245. dbgln("{}: Features not accepted by host!", VirtIO::determine_device_class(pci_address()));
  246. return false;
  247. }
  248. m_accepted_features = accepted_features;
  249. dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", VirtIO::determine_device_class(pci_address()));
  250. return true;
  251. }
  252. void Device::reset_device()
  253. {
  254. dbgln_if(VIRTIO_DEBUG, "{}: Reset device", VirtIO::determine_device_class(pci_address()));
  255. if (!m_common_cfg) {
  256. mask_status_bits(0);
  257. while (read_status_bits() != 0) {
  258. // TODO: delay a bit?
  259. }
  260. return;
  261. }
  262. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
  263. while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
  264. // TODO: delay a bit?
  265. }
  266. }
  267. bool Device::setup_queue(u16 queue_index)
  268. {
  269. if (!m_common_cfg)
  270. return false;
  271. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  272. u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
  273. if (queue_size == 0) {
  274. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", VirtIO::determine_device_class(pci_address()), queue_index);
  275. return true;
  276. }
  277. u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
  278. auto queue = make<Queue>(queue_size, queue_notify_offset);
  279. if (queue->is_null())
  280. return false;
  281. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
  282. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
  283. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
  284. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", VirtIO::determine_device_class(pci_address()), queue_index, queue_size);
  285. m_queues.append(move(queue));
  286. return true;
  287. }
  288. bool Device::activate_queue(u16 queue_index)
  289. {
  290. if (!m_common_cfg)
  291. return false;
  292. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  293. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
  294. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", VirtIO::determine_device_class(pci_address()), queue_index);
  295. return true;
  296. }
  297. bool Device::setup_queues(u16 requested_queue_count)
  298. {
  299. VERIFY(!m_did_setup_queues);
  300. m_did_setup_queues = true;
  301. if (m_common_cfg) {
  302. auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
  303. if (requested_queue_count == 0) {
  304. m_queue_count = maximum_queue_count;
  305. } else if (requested_queue_count > maximum_queue_count) {
  306. dbgln("{}: {} queues requested but only {} available!", VirtIO::determine_device_class(pci_address()), m_queue_count, maximum_queue_count);
  307. return false;
  308. } else {
  309. m_queue_count = requested_queue_count;
  310. }
  311. } else {
  312. m_queue_count = requested_queue_count;
  313. dbgln("{}: device's available queue count could not be determined!", VirtIO::determine_device_class(pci_address()));
  314. }
  315. dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", VirtIO::determine_device_class(pci_address()), m_queue_count);
  316. for (u16 i = 0; i < m_queue_count; i++) {
  317. if (!setup_queue(i))
  318. return false;
  319. }
  320. for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
  321. if (!activate_queue(i))
  322. return false;
  323. }
  324. return true;
  325. }
  326. void Device::finish_init()
  327. {
  328. VERIFY(m_did_accept_features); // ensure features were negotiated
  329. VERIFY(m_did_setup_queues); // ensure queues were set-up
  330. VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
  331. set_status_bit(DEVICE_STATUS_DRIVER_OK);
  332. dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", VirtIO::determine_device_class(pci_address()));
  333. }
  334. u8 Device::isr_status()
  335. {
  336. if (!m_isr_cfg)
  337. return in<u8>(REG_ISR_STATUS);
  338. return config_read8(*m_isr_cfg, 0);
  339. }
  340. bool Device::handle_irq(const RegisterState&)
  341. {
  342. u8 isr_type = isr_status();
  343. if ((isr_type & (QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT)) == 0) {
  344. dbgln_if(VIRTIO_DEBUG, "{}: Handling interrupt with unknown type: {}", VirtIO::determine_device_class(pci_address()), isr_type);
  345. return false;
  346. }
  347. if (isr_type & DEVICE_CONFIG_INTERRUPT) {
  348. dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Device config interrupt!", VirtIO::determine_device_class(pci_address()));
  349. if (!handle_device_config_change()) {
  350. set_status_bit(DEVICE_STATUS_FAILED);
  351. dbgln("{}: Failed to handle device config change!", VirtIO::determine_device_class(pci_address()));
  352. }
  353. }
  354. if (isr_type & QUEUE_INTERRUPT) {
  355. dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Queue interrupt!", VirtIO::determine_device_class(pci_address()));
  356. for (size_t i = 0; i < m_queues.size(); i++) {
  357. if (get_queue(i).new_data_available()) {
  358. handle_queue_update(i);
  359. return true;
  360. }
  361. }
  362. dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", VirtIO::determine_device_class(pci_address()));
  363. }
  364. return true;
  365. }
  366. void Device::supply_chain_and_notify(u16 queue_index, QueueChain& chain)
  367. {
  368. auto& queue = get_queue(queue_index);
  369. VERIFY(&chain.queue() == &queue);
  370. VERIFY(queue.lock().is_locked());
  371. chain.submit_to_queue();
  372. if (queue.should_notify())
  373. notify_queue(queue_index);
  374. }
  375. }