FrameBufferDevice.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Graphics/GraphicsManagement.h>
  7. #include <Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h>
  8. #include <LibC/sys/ioctl_numbers.h>
  9. namespace Kernel::Graphics::VirtIOGPU {
  10. FrameBufferDevice::FrameBufferDevice(GPU& virtio_gpu, ScanoutID scanout)
  11. : BlockDevice(29, GraphicsManagement::the().allocate_minor_device_number())
  12. , m_gpu(virtio_gpu)
  13. , m_scanout(scanout)
  14. {
  15. if (display_info().enabled) {
  16. // FIXME: This should be in a place where we can handle allocation failures.
  17. auto result = create_framebuffer();
  18. VERIFY(!result.is_error());
  19. }
  20. }
  21. FrameBufferDevice::~FrameBufferDevice()
  22. {
  23. }
  24. KResult FrameBufferDevice::create_framebuffer()
  25. {
  26. // First delete any existing framebuffers to free the memory first
  27. m_framebuffer = nullptr;
  28. m_framebuffer_sink_vmobject = nullptr;
  29. // Allocate frame buffer for both front and back
  30. auto& info = display_info();
  31. m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height);
  32. m_framebuffer = MM.allocate_kernel_region(m_buffer_size * 2, String::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
  33. auto write_sink_page = MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No).release_nonnull();
  34. auto num_needed_pages = m_framebuffer->vmobject().page_count();
  35. NonnullRefPtrVector<Memory::PhysicalPage> pages;
  36. for (auto i = 0u; i < num_needed_pages; ++i) {
  37. pages.append(write_sink_page);
  38. }
  39. auto maybe_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span());
  40. if (maybe_framebuffer_sink_vmobject.is_error())
  41. return maybe_framebuffer_sink_vmobject.error();
  42. m_framebuffer_sink_vmobject = maybe_framebuffer_sink_vmobject.release_value();
  43. MutexLocker locker(m_gpu.operation_lock());
  44. m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());
  45. create_buffer(m_main_buffer, 0, m_buffer_size);
  46. create_buffer(m_back_buffer, m_buffer_size, m_buffer_size);
  47. return KSuccess;
  48. }
  49. void FrameBufferDevice::create_buffer(Buffer& buffer, size_t framebuffer_offset, size_t framebuffer_size)
  50. {
  51. buffer.framebuffer_offset = framebuffer_offset;
  52. buffer.framebuffer_data = m_framebuffer->vaddr().as_ptr() + framebuffer_offset;
  53. auto& info = display_info();
  54. // 1. Create BUFFER using VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
  55. if (buffer.resource_id.value() != 0)
  56. m_gpu.delete_resource(buffer.resource_id);
  57. buffer.resource_id = m_gpu.create_2d_resource(info.rect);
  58. // 2. Attach backing storage using VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
  59. m_gpu.ensure_backing_storage(*m_framebuffer, buffer.framebuffer_offset, framebuffer_size, buffer.resource_id);
  60. // 3. Use VIRTIO_GPU_CMD_SET_SCANOUT to link the framebuffer to a display scanout.
  61. if (&buffer == m_current_buffer)
  62. m_gpu.set_scanout_resource(m_scanout.value(), buffer.resource_id, info.rect);
  63. // 4. Render our test pattern
  64. draw_ntsc_test_pattern(buffer);
  65. // 5. Use VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D to update the host resource from guest memory.
  66. transfer_framebuffer_data_to_host(info.rect, buffer);
  67. // 6. Use VIRTIO_GPU_CMD_RESOURCE_FLUSH to flush the updated resource to the display.
  68. if (&buffer == m_current_buffer)
  69. flush_displayed_image(info.rect, buffer);
  70. // Make sure we constrain the existing dirty rect (if any)
  71. if (buffer.dirty_rect.width != 0 || buffer.dirty_rect.height != 0) {
  72. auto dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
  73. auto dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
  74. buffer.dirty_rect.width = min(dirty_right, info.rect.x + info.rect.width) - buffer.dirty_rect.x;
  75. buffer.dirty_rect.height = min(dirty_bottom, info.rect.y + info.rect.height) - buffer.dirty_rect.y;
  76. }
  77. info.enabled = 1;
  78. }
  79. Protocol::DisplayInfoResponse::Display const& FrameBufferDevice::display_info() const
  80. {
  81. return m_gpu.display_info(m_scanout);
  82. }
  83. Protocol::DisplayInfoResponse::Display& FrameBufferDevice::display_info()
  84. {
  85. return m_gpu.display_info(m_scanout);
  86. }
  87. void FrameBufferDevice::transfer_framebuffer_data_to_host(Protocol::Rect const& rect, Buffer& buffer)
  88. {
  89. m_gpu.transfer_framebuffer_data_to_host(m_scanout, rect, buffer.resource_id);
  90. }
  91. void FrameBufferDevice::flush_dirty_window(Protocol::Rect const& dirty_rect, Buffer& buffer)
  92. {
  93. m_gpu.flush_dirty_rectangle(m_scanout, dirty_rect, buffer.resource_id);
  94. }
  95. void FrameBufferDevice::flush_displayed_image(Protocol::Rect const& dirty_rect, Buffer& buffer)
  96. {
  97. m_gpu.flush_displayed_image(dirty_rect, buffer.resource_id);
  98. }
  99. bool FrameBufferDevice::try_to_set_resolution(size_t width, size_t height)
  100. {
  101. if (width > MAX_VIRTIOGPU_RESOLUTION_WIDTH || height > MAX_VIRTIOGPU_RESOLUTION_HEIGHT)
  102. return false;
  103. auto& info = display_info();
  104. MutexLocker locker(m_gpu.operation_lock());
  105. info.rect = {
  106. .x = 0,
  107. .y = 0,
  108. .width = (u32)width,
  109. .height = (u32)height,
  110. };
  111. // FIXME: Would be nice to be able to return KResultOr here.
  112. if (auto result = create_framebuffer(); result.is_error())
  113. return false;
  114. return true;
  115. }
  116. void FrameBufferDevice::set_buffer(int buffer_index)
  117. {
  118. auto& buffer = buffer_index == 0 ? m_main_buffer : m_back_buffer;
  119. MutexLocker locker(m_gpu.operation_lock());
  120. if (&buffer == m_current_buffer)
  121. return;
  122. m_current_buffer = &buffer;
  123. m_gpu.set_scanout_resource(m_scanout.value(), buffer.resource_id, display_info().rect);
  124. m_gpu.flush_displayed_image(buffer.dirty_rect, buffer.resource_id); // QEMU SDL backend requires this (as per spec)
  125. buffer.dirty_rect = {};
  126. }
  127. KResult FrameBufferDevice::ioctl(FileDescription&, unsigned request, Userspace<void*> arg)
  128. {
  129. REQUIRE_PROMISE(video);
  130. switch (request) {
  131. case FB_IOCTL_GET_SIZE_IN_BYTES: {
  132. auto out = static_ptr_cast<size_t*>(arg);
  133. size_t value = m_buffer_size * 2;
  134. if (!copy_to_user(out, &value))
  135. return EFAULT;
  136. return KSuccess;
  137. }
  138. case FB_IOCTL_SET_RESOLUTION: {
  139. auto user_resolution = static_ptr_cast<FBResolution*>(arg);
  140. FBResolution resolution;
  141. if (!copy_from_user(&resolution, user_resolution))
  142. return EFAULT;
  143. if (!try_to_set_resolution(resolution.width, resolution.height))
  144. return EINVAL;
  145. resolution.pitch = pitch();
  146. if (!copy_to_user(user_resolution, &resolution))
  147. return EFAULT;
  148. return KSuccess;
  149. }
  150. case FB_IOCTL_GET_RESOLUTION: {
  151. auto user_resolution = static_ptr_cast<FBResolution*>(arg);
  152. FBResolution resolution {};
  153. resolution.pitch = pitch();
  154. resolution.width = width();
  155. resolution.height = height();
  156. if (!copy_to_user(user_resolution, &resolution))
  157. return EFAULT;
  158. return KSuccess;
  159. }
  160. case FB_IOCTL_SET_BUFFER: {
  161. auto buffer_index = static_cast<int>(arg.ptr());
  162. if (!is_valid_buffer_index(buffer_index))
  163. return EINVAL;
  164. if (m_last_set_buffer_index.exchange(buffer_index) != buffer_index && m_are_writes_active)
  165. set_buffer(buffer_index);
  166. return KSuccess;
  167. }
  168. case FB_IOCTL_FLUSH_BUFFERS: {
  169. auto user_flush_rects = static_ptr_cast<FBFlushRects*>(arg);
  170. FBFlushRects flush_rects;
  171. if (!copy_from_user(&flush_rects, user_flush_rects))
  172. return EFAULT;
  173. if (!is_valid_buffer_index(flush_rects.buffer_index))
  174. return EINVAL;
  175. if (Checked<unsigned>::multiplication_would_overflow(flush_rects.count, sizeof(FBRect)))
  176. return EFAULT;
  177. if (m_are_writes_active && flush_rects.count > 0) {
  178. auto& buffer = buffer_from_index(flush_rects.buffer_index);
  179. MutexLocker locker(m_gpu.operation_lock());
  180. for (unsigned i = 0; i < flush_rects.count; i++) {
  181. FBRect user_dirty_rect;
  182. if (!copy_from_user(&user_dirty_rect, &flush_rects.rects[i]))
  183. return EFAULT;
  184. Protocol::Rect dirty_rect {
  185. .x = user_dirty_rect.x,
  186. .y = user_dirty_rect.y,
  187. .width = user_dirty_rect.width,
  188. .height = user_dirty_rect.height
  189. };
  190. transfer_framebuffer_data_to_host(dirty_rect, buffer);
  191. if (&buffer == m_current_buffer) {
  192. // Flushing directly to screen
  193. flush_displayed_image(dirty_rect, buffer);
  194. buffer.dirty_rect = {};
  195. } else {
  196. if (buffer.dirty_rect.width == 0 || buffer.dirty_rect.height == 0) {
  197. buffer.dirty_rect = dirty_rect;
  198. } else {
  199. auto current_dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
  200. auto current_dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
  201. buffer.dirty_rect.x = min(buffer.dirty_rect.x, dirty_rect.x);
  202. buffer.dirty_rect.y = min(buffer.dirty_rect.y, dirty_rect.y);
  203. buffer.dirty_rect.width = max(current_dirty_right, dirty_rect.x + dirty_rect.width) - buffer.dirty_rect.x;
  204. buffer.dirty_rect.height = max(current_dirty_bottom, dirty_rect.y + dirty_rect.height) - buffer.dirty_rect.y;
  205. }
  206. }
  207. }
  208. }
  209. return KSuccess;
  210. }
  211. case FB_IOCTL_GET_BUFFER_OFFSET: {
  212. auto user_buffer_offset = static_ptr_cast<FBBufferOffset*>(arg);
  213. FBBufferOffset buffer_offset;
  214. if (!copy_from_user(&buffer_offset, user_buffer_offset))
  215. return EFAULT;
  216. if (!is_valid_buffer_index(buffer_offset.buffer_index))
  217. return EINVAL;
  218. buffer_offset.offset = (size_t)buffer_offset.buffer_index * m_buffer_size;
  219. if (!copy_to_user(user_buffer_offset, &buffer_offset))
  220. return EFAULT;
  221. return KSuccess;
  222. }
  223. default:
  224. return EINVAL;
  225. };
  226. }
  227. KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
  228. {
  229. REQUIRE_PROMISE(video);
  230. if (!shared)
  231. return ENODEV;
  232. if (offset != 0 || !m_framebuffer)
  233. return ENXIO;
  234. if (range.size() > m_framebuffer->size())
  235. return EOVERFLOW;
  236. // We only allow one process to map the region
  237. if (m_userspace_mmap_region)
  238. return ENOMEM;
  239. RefPtr<Memory::VMObject> vmobject;
  240. if (m_are_writes_active) {
  241. auto maybe_vmobject = m_framebuffer->vmobject().try_clone();
  242. if (maybe_vmobject.is_error())
  243. return maybe_vmobject.error();
  244. vmobject = maybe_vmobject.release_value();
  245. } else {
  246. vmobject = m_framebuffer_sink_vmobject;
  247. if (vmobject.is_null())
  248. return ENOMEM;
  249. }
  250. auto result = process.address_space().allocate_region_with_vmobject(
  251. range,
  252. vmobject.release_nonnull(),
  253. 0,
  254. "VirtIOGPU Framebuffer",
  255. prot,
  256. shared);
  257. if (result.is_error())
  258. return result;
  259. m_userspace_mmap_region = result.value();
  260. return result;
  261. }
  262. void FrameBufferDevice::deactivate_writes()
  263. {
  264. m_are_writes_active = false;
  265. if (m_userspace_mmap_region) {
  266. auto* region = m_userspace_mmap_region.unsafe_ptr();
  267. auto maybe_vm_object = m_framebuffer_sink_vmobject->try_clone();
  268. // FIXME: Would be nice to be able to return a KResult here.
  269. VERIFY(!maybe_vm_object.is_error());
  270. region->set_vmobject(maybe_vm_object.release_value());
  271. region->remap();
  272. }
  273. set_buffer(0);
  274. clear_to_black(buffer_from_index(0));
  275. }
  276. void FrameBufferDevice::activate_writes()
  277. {
  278. m_are_writes_active = true;
  279. auto last_set_buffer_index = m_last_set_buffer_index.load();
  280. if (m_userspace_mmap_region) {
  281. auto* region = m_userspace_mmap_region.unsafe_ptr();
  282. region->set_vmobject(m_framebuffer->vmobject());
  283. region->remap();
  284. }
  285. set_buffer(last_set_buffer_index);
  286. }
  287. void FrameBufferDevice::clear_to_black(Buffer& buffer)
  288. {
  289. auto& info = display_info();
  290. size_t width = info.rect.width;
  291. size_t height = info.rect.height;
  292. u8* data = buffer.framebuffer_data;
  293. for (size_t i = 0; i < width * height; ++i) {
  294. data[4 * i + 0] = 0x00;
  295. data[4 * i + 1] = 0x00;
  296. data[4 * i + 2] = 0x00;
  297. data[4 * i + 3] = 0xff;
  298. }
  299. }
  300. void FrameBufferDevice::draw_ntsc_test_pattern(Buffer& buffer)
  301. {
  302. static constexpr u8 colors[12][4] = {
  303. { 0xff, 0xff, 0xff, 0xff }, // White
  304. { 0x00, 0xff, 0xff, 0xff }, // Primary + Composite colors
  305. { 0xff, 0xff, 0x00, 0xff },
  306. { 0x00, 0xff, 0x00, 0xff },
  307. { 0xff, 0x00, 0xff, 0xff },
  308. { 0x00, 0x00, 0xff, 0xff },
  309. { 0xff, 0x00, 0x00, 0xff },
  310. { 0xba, 0x01, 0x5f, 0xff }, // Dark blue
  311. { 0x8d, 0x3d, 0x00, 0xff }, // Purple
  312. { 0x22, 0x22, 0x22, 0xff }, // Shades of gray
  313. { 0x10, 0x10, 0x10, 0xff },
  314. { 0x00, 0x00, 0x00, 0xff },
  315. };
  316. auto& info = display_info();
  317. size_t width = info.rect.width;
  318. size_t height = info.rect.height;
  319. u8* data = buffer.framebuffer_data;
  320. // Draw NTSC test card
  321. for (size_t y = 0; y < height; ++y) {
  322. for (size_t x = 0; x < width; ++x) {
  323. size_t color = 0;
  324. if (3 * y < 2 * height) {
  325. // Top 2/3 of image is 7 vertical stripes of color spectrum
  326. color = (7 * x) / width;
  327. } else if (4 * y < 3 * height) {
  328. // 2/3 mark to 3/4 mark is backwards color spectrum alternating with black
  329. auto segment = (7 * x) / width;
  330. color = segment % 2 ? 10 : 6 - segment;
  331. } else {
  332. if (28 * x < 5 * width) {
  333. color = 8;
  334. } else if (28 * x < 10 * width) {
  335. color = 0;
  336. } else if (28 * x < 15 * width) {
  337. color = 7;
  338. } else if (28 * x < 20 * width) {
  339. color = 10;
  340. } else if (7 * x < 6 * width) {
  341. // Grayscale gradient
  342. color = 26 - ((21 * x) / width);
  343. } else {
  344. // Solid black
  345. color = 10;
  346. }
  347. }
  348. u8* pixel = &data[4 * (y * width + x)];
  349. for (int i = 0; i < 4; ++i) {
  350. pixel[i] = colors[color][i];
  351. }
  352. }
  353. }
  354. dbgln_if(VIRTIO_DEBUG, "Finish drawing the pattern");
  355. }
  356. u8* FrameBufferDevice::framebuffer_data()
  357. {
  358. return m_current_buffer->framebuffer_data;
  359. }
  360. }