VirtIO.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/CommandLine.h>
  7. #include <Kernel/PCI/IDs.h>
  8. #include <Kernel/VirtIO/VirtIO.h>
  9. #include <Kernel/VirtIO/VirtIOConsole.h>
  10. #include <Kernel/VirtIO/VirtIORNG.h>
  11. namespace Kernel {
  12. void VirtIO::detect()
  13. {
  14. if (kernel_command_line().disable_virtio())
  15. return;
  16. PCI::enumerate([&](const PCI::Address& address, PCI::ID id) {
  17. if (address.is_null() || id.is_null())
  18. return;
  19. if (id.vendor_id != (u16)PCIVendorID::VirtIO)
  20. return;
  21. switch (id.device_id) {
  22. case (u16)PCIDeviceID::VirtIOConsole: {
  23. [[maybe_unused]] auto& unused = adopt_ref(*new VirtIOConsole(address)).leak_ref();
  24. break;
  25. }
  26. case (u16)PCIDeviceID::VirtIOEntropy: {
  27. [[maybe_unused]] auto& unused = adopt_ref(*new VirtIORNG(address)).leak_ref();
  28. break;
  29. }
  30. default:
  31. dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", id.device_id);
  32. break;
  33. }
  34. });
  35. }
  36. VirtIODevice::VirtIODevice(PCI::Address address, String class_name)
  37. : PCI::Device(address, PCI::get_interrupt_line(address))
  38. , m_class_name(move(class_name))
  39. , m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
  40. {
  41. dbgln("{}: Found @ {}", m_class_name, pci_address());
  42. enable_bus_mastering(pci_address());
  43. PCI::enable_interrupt_line(pci_address());
  44. enable_irq();
  45. reset_device();
  46. set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
  47. auto capabilities = PCI::get_physical_id(address).capabilities();
  48. for (auto& capability : capabilities) {
  49. if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
  50. // We have a virtio_pci_cap
  51. auto cfg = make<Configuration>();
  52. auto raw_config_type = capability.read8(0x3);
  53. if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
  54. dbgln("{}: Unknown capability configuration type: {}", m_class_name, raw_config_type);
  55. return;
  56. }
  57. cfg->cfg_type = static_cast<ConfigurationType>(raw_config_type);
  58. auto cap_length = capability.read8(0x2);
  59. if (cap_length < 0x10) {
  60. dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length);
  61. break;
  62. }
  63. cfg->bar = capability.read8(0x4);
  64. if (cfg->bar > 0x5) {
  65. dbgln("{}: Unexpected capability bar value: {}", m_class_name, cfg->bar);
  66. break;
  67. }
  68. cfg->offset = capability.read32(0x8);
  69. cfg->length = capability.read32(0xc);
  70. dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, (u32)cfg->cfg_type, cfg->bar, cfg->offset, cfg->length);
  71. if (cfg->cfg_type == ConfigurationType::Common)
  72. m_use_mmio = true;
  73. else if (cfg->cfg_type == ConfigurationType::Notify)
  74. m_notify_multiplier = capability.read32(0x10);
  75. m_configs.append(move(cfg));
  76. }
  77. }
  78. if (m_use_mmio) {
  79. m_common_cfg = get_config(ConfigurationType::Common, 0);
  80. m_notify_cfg = get_config(ConfigurationType::Notify, 0);
  81. m_isr_cfg = get_config(ConfigurationType::ISR, 0);
  82. }
  83. set_status_bit(DEVICE_STATUS_DRIVER);
  84. }
  85. VirtIODevice::~VirtIODevice()
  86. {
  87. }
  88. auto VirtIODevice::mapping_for_bar(u8 bar) -> MappedMMIO&
  89. {
  90. VERIFY(m_use_mmio);
  91. auto& mapping = m_mmio[bar];
  92. if (!mapping.base) {
  93. mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
  94. mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), page_round_up(mapping.size), "VirtIO MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No);
  95. if (!mapping.base)
  96. dbgln("{}: Failed to map bar {}", m_class_name, bar);
  97. }
  98. return mapping;
  99. }
  100. void VirtIODevice::notify_queue(u16 queue_index)
  101. {
  102. dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", m_class_name, queue_index);
  103. if (!m_notify_cfg)
  104. out<u16>(REG_QUEUE_NOTIFY, queue_index);
  105. else
  106. config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
  107. }
  108. u8 VirtIODevice::config_read8(const Configuration& config, u32 offset)
  109. {
  110. return mapping_for_bar(config.bar).read<u8>(config.offset + offset);
  111. }
  112. u16 VirtIODevice::config_read16(const Configuration& config, u32 offset)
  113. {
  114. return mapping_for_bar(config.bar).read<u16>(config.offset + offset);
  115. }
  116. u32 VirtIODevice::config_read32(const Configuration& config, u32 offset)
  117. {
  118. return mapping_for_bar(config.bar).read<u32>(config.offset + offset);
  119. }
  120. void VirtIODevice::config_write8(const Configuration& config, u32 offset, u8 value)
  121. {
  122. mapping_for_bar(config.bar).write(config.offset + offset, value);
  123. }
  124. void VirtIODevice::config_write16(const Configuration& config, u32 offset, u16 value)
  125. {
  126. mapping_for_bar(config.bar).write(config.offset + offset, value);
  127. }
  128. void VirtIODevice::config_write32(const Configuration& config, u32 offset, u32 value)
  129. {
  130. mapping_for_bar(config.bar).write(config.offset + offset, value);
  131. }
  132. void VirtIODevice::config_write64(const Configuration& config, u32 offset, u64 value)
  133. {
  134. mapping_for_bar(config.bar).write(config.offset + offset, value);
  135. }
  136. u8 VirtIODevice::read_status_bits()
  137. {
  138. if (!m_common_cfg)
  139. return in<u8>(REG_DEVICE_STATUS);
  140. return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
  141. }
  142. void VirtIODevice::clear_status_bit(u8 status_bit)
  143. {
  144. m_status &= status_bit;
  145. if (!m_common_cfg)
  146. out<u8>(REG_DEVICE_STATUS, m_status);
  147. else
  148. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  149. }
  150. void VirtIODevice::set_status_bit(u8 status_bit)
  151. {
  152. m_status |= status_bit;
  153. if (!m_common_cfg)
  154. out<u8>(REG_DEVICE_STATUS, m_status);
  155. else
  156. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
  157. }
  158. u64 VirtIODevice::get_device_features()
  159. {
  160. if (!m_common_cfg)
  161. return in<u32>(REG_DEVICE_FEATURES);
  162. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
  163. auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
  164. config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
  165. u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
  166. return upper_bits | lower_bits;
  167. }
  168. bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_features)
  169. {
  170. VERIFY(!m_did_accept_features);
  171. m_did_accept_features = true;
  172. if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
  173. accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
  174. }
  175. if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
  176. dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", m_class_name);
  177. accepted_features &= ~(VIRTIO_F_RING_PACKED);
  178. }
  179. // TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
  180. if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
  181. // accepted_features |= VIRTIO_F_INDIRECT_DESC;
  182. }
  183. if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
  184. accepted_features |= VIRTIO_F_IN_ORDER;
  185. }
  186. dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", m_class_name, device_features);
  187. dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", m_class_name, accepted_features);
  188. if (!m_common_cfg) {
  189. out<u32>(REG_GUEST_FEATURES, accepted_features);
  190. } else {
  191. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
  192. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
  193. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
  194. config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
  195. }
  196. set_status_bit(DEVICE_STATUS_FEATURES_OK);
  197. m_status = read_status_bits();
  198. if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
  199. set_status_bit(DEVICE_STATUS_FAILED);
  200. dbgln("{}: Features not accepted by host!", m_class_name);
  201. return false;
  202. }
  203. m_accepted_features = accepted_features;
  204. dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", m_class_name);
  205. return true;
  206. }
  207. void VirtIODevice::reset_device()
  208. {
  209. dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name);
  210. if (!m_common_cfg) {
  211. clear_status_bit(0);
  212. while (read_status_bits() != 0) {
  213. // TODO: delay a bit?
  214. }
  215. return;
  216. }
  217. config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
  218. while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
  219. // TODO: delay a bit?
  220. }
  221. }
  222. bool VirtIODevice::setup_queue(u16 queue_index)
  223. {
  224. if (!m_common_cfg)
  225. return false;
  226. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  227. u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
  228. if (queue_size == 0) {
  229. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index);
  230. return true;
  231. }
  232. u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
  233. auto queue = make<VirtIOQueue>(queue_size, queue_notify_offset);
  234. if (queue->is_null())
  235. return false;
  236. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
  237. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
  238. config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
  239. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", m_class_name, queue_index, queue_size);
  240. m_queues.append(move(queue));
  241. return true;
  242. }
  243. bool VirtIODevice::activate_queue(u16 queue_index)
  244. {
  245. if (!m_common_cfg)
  246. return false;
  247. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
  248. config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
  249. dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", m_class_name, queue_index);
  250. return true;
  251. }
  252. bool VirtIODevice::setup_queues(u16 requested_queue_count)
  253. {
  254. VERIFY(!m_did_setup_queues);
  255. m_did_setup_queues = true;
  256. if (m_common_cfg) {
  257. auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
  258. if (requested_queue_count == 0) {
  259. m_queue_count = maximum_queue_count;
  260. } else if (requested_queue_count > maximum_queue_count) {
  261. dbgln("{}: {} queues requested but only {} available!", m_class_name, m_queue_count, maximum_queue_count);
  262. return false;
  263. } else {
  264. m_queue_count = requested_queue_count;
  265. }
  266. } else {
  267. m_queue_count = requested_queue_count;
  268. dbgln("{}: device's available queue count could not be determined!", m_class_name);
  269. }
  270. dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", m_class_name, m_queue_count);
  271. for (u16 i = 0; i < m_queue_count; i++) {
  272. if (!setup_queue(i))
  273. return false;
  274. }
  275. for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
  276. if (!activate_queue(i))
  277. return false;
  278. }
  279. return true;
  280. }
  281. void VirtIODevice::finish_init()
  282. {
  283. VERIFY(m_did_accept_features); // ensure features were negotiated
  284. VERIFY(m_did_setup_queues); // ensure queues were set-up
  285. VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
  286. set_status_bit(DEVICE_STATUS_DRIVER_OK);
  287. dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
  288. }
  289. void VirtIODevice::supply_buffer_and_notify(u16 queue_index, const ScatterGatherList& scatter_list, BufferType buffer_type, void* token)
  290. {
  291. VERIFY(queue_index < m_queue_count);
  292. if (get_queue(queue_index).supply_buffer({}, scatter_list, buffer_type, token))
  293. notify_queue(queue_index);
  294. }
  295. u8 VirtIODevice::isr_status()
  296. {
  297. if (!m_isr_cfg)
  298. return in<u8>(REG_ISR_STATUS);
  299. return config_read8(*m_isr_cfg, 0);
  300. }
  301. void VirtIODevice::handle_irq(const RegisterState&)
  302. {
  303. u8 isr_type = isr_status();
  304. if (isr_type & DEVICE_CONFIG_INTERRUPT) {
  305. if (!handle_device_config_change()) {
  306. set_status_bit(DEVICE_STATUS_FAILED);
  307. dbgln("{}: Failed to handle device config change!", m_class_name);
  308. }
  309. }
  310. if (isr_type & QUEUE_INTERRUPT) {
  311. for (size_t i = 0; i < m_queues.size(); i++) {
  312. if (get_queue(i).new_data_available())
  313. return handle_queue_update(i);
  314. }
  315. dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", m_class_name);
  316. }
  317. if (isr_type & ~(QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT))
  318. dbgln("{}: Handling interrupt with unknown type: {}", m_class_name, isr_type);
  319. }
  320. }