Sampler.cpp 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright (c) 2021, Stephan Unverwerth <s.unverwerth@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/SIMDExtras.h>
  7. #include <AK/SIMDMath.h>
  8. #include <LibSoftGPU/Config.h>
  9. #include <LibSoftGPU/Image.h>
  10. #include <LibSoftGPU/SIMD.h>
  11. #include <LibSoftGPU/Sampler.h>
  12. #include <math.h>
  13. namespace SoftGPU {
  14. using AK::SIMD::f32x4;
  15. using AK::SIMD::i32x4;
  16. using AK::SIMD::u32x4;
  17. using AK::SIMD::clamp;
  18. using AK::SIMD::expand4;
  19. using AK::SIMD::floor_int_range;
  20. using AK::SIMD::frac_int_range;
  21. using AK::SIMD::maskbits;
  22. using AK::SIMD::to_f32x4;
  23. using AK::SIMD::to_i32x4;
  24. using AK::SIMD::to_u32x4;
  25. static f32x4 wrap_repeat(f32x4 value)
  26. {
  27. return frac_int_range(value);
  28. }
  29. [[maybe_unused]] static f32x4 wrap_clamp(f32x4 value)
  30. {
  31. return clamp(value, expand4(0.0f), expand4(1.0f));
  32. }
  33. static f32x4 wrap_clamp_to_edge(f32x4 value, f32x4 num_texels)
  34. {
  35. f32x4 const clamp_limit = .5f / num_texels;
  36. return clamp(value, clamp_limit, 1.f - clamp_limit);
  37. }
  38. static f32x4 wrap_mirrored_repeat(f32x4 value, f32x4 num_texels)
  39. {
  40. f32x4 integer = floor_int_range(value);
  41. f32x4 frac = value - integer;
  42. auto is_odd = to_i32x4(integer) & 1;
  43. return wrap_clamp_to_edge(is_odd ? 1 - frac : frac, num_texels);
  44. }
  45. static f32x4 wrap(f32x4 value, GPU::TextureWrapMode mode, f32x4 num_texels)
  46. {
  47. switch (mode) {
  48. case GPU::TextureWrapMode::Repeat:
  49. return wrap_repeat(value);
  50. case GPU::TextureWrapMode::MirroredRepeat:
  51. return wrap_mirrored_repeat(value, num_texels);
  52. case GPU::TextureWrapMode::Clamp:
  53. if constexpr (CLAMP_DEPRECATED_BEHAVIOR) {
  54. return wrap_clamp(value);
  55. }
  56. return wrap_clamp_to_edge(value, num_texels);
  57. case GPU::TextureWrapMode::ClampToBorder:
  58. case GPU::TextureWrapMode::ClampToEdge:
  59. return wrap_clamp_to_edge(value, num_texels);
  60. default:
  61. VERIFY_NOT_REACHED();
  62. }
  63. }
  64. ALWAYS_INLINE static Vector4<f32x4> texel4(Image const& image, u32x4 level, u32x4 x, u32x4 y)
  65. {
  66. auto const& t0 = image.texel(level[0], x[0], y[0], 0);
  67. auto const& t1 = image.texel(level[1], x[1], y[1], 0);
  68. auto const& t2 = image.texel(level[2], x[2], y[2], 0);
  69. auto const& t3 = image.texel(level[3], x[3], y[3], 0);
  70. return Vector4<f32x4> {
  71. f32x4 { t0.x(), t1.x(), t2.x(), t3.x() },
  72. f32x4 { t0.y(), t1.y(), t2.y(), t3.y() },
  73. f32x4 { t0.z(), t1.z(), t2.z(), t3.z() },
  74. f32x4 { t0.w(), t1.w(), t2.w(), t3.w() },
  75. };
  76. }
  77. ALWAYS_INLINE static Vector4<f32x4> texel4border(Image const& image, u32x4 level, u32x4 x, u32x4 y, FloatVector4 const& border, u32x4 w, u32x4 h)
  78. {
  79. auto border_mask = maskbits(x < 0 || x >= w || y < 0 || y >= h);
  80. auto const& t0 = (border_mask & 1) > 0 ? border : image.texel(level[0], x[0], y[0], 0);
  81. auto const& t1 = (border_mask & 2) > 0 ? border : image.texel(level[1], x[1], y[1], 0);
  82. auto const& t2 = (border_mask & 4) > 0 ? border : image.texel(level[2], x[2], y[2], 0);
  83. auto const& t3 = (border_mask & 8) > 0 ? border : image.texel(level[3], x[3], y[3], 0);
  84. return Vector4<f32x4> {
  85. f32x4 { t0.x(), t1.x(), t2.x(), t3.x() },
  86. f32x4 { t0.y(), t1.y(), t2.y(), t3.y() },
  87. f32x4 { t0.z(), t1.z(), t2.z(), t3.z() },
  88. f32x4 { t0.w(), t1.w(), t2.w(), t3.w() },
  89. };
  90. }
  91. Vector4<AK::SIMD::f32x4> Sampler::sample_2d(Vector2<AK::SIMD::f32x4> const& uv) const
  92. {
  93. if (m_config.bound_image.is_null())
  94. return expand4(FloatVector4 { 1, 0, 0, 1 });
  95. auto const& image = *static_ptr_cast<Image>(m_config.bound_image);
  96. // FIXME: Make base level configurable with glTexParameteri(GL_TEXTURE_BASE_LEVEL, base_level)
  97. constexpr unsigned base_level = 0;
  98. // Determine the texture scale factor. See OpenGL 1.5 spec chapter 3.8.8.
  99. // FIXME: Static casting from u32 to float could silently truncate here.
  100. // u16 should be plenty enough for texture dimensions and would allow textures of up to 65536x65536x65536 pixels.
  101. auto texel_coordinates = uv;
  102. texel_coordinates.set_x(texel_coordinates.x() * static_cast<float>(image.width_at_level(base_level)));
  103. texel_coordinates.set_y(texel_coordinates.y() * static_cast<float>(image.height_at_level(base_level)));
  104. auto dtdx = ddx(texel_coordinates);
  105. auto dtdy = ddy(texel_coordinates);
  106. auto scale_factor = max(dtdx.dot(dtdx), dtdy.dot(dtdy));
  107. // FIXME: Here we simply determine the filter based on the single scale factor of the upper left pixel.
  108. // Actually, we could end up with different scale factors for each pixel. This however would break our
  109. // parallelisation as we could also end up with different filter modes per pixel.
  110. // Note: scale_factor approximates texels per pixel. This means a scale factor less than 1 indicates texture magnification.
  111. if (scale_factor[0] <= 1.f)
  112. return sample_2d_lod(uv, expand4(base_level), m_config.texture_mag_filter);
  113. if (m_config.mipmap_filter == GPU::MipMapFilter::None)
  114. return sample_2d_lod(uv, expand4(base_level), m_config.texture_min_filter);
  115. auto texture_lod_bias = AK::clamp(m_config.level_of_detail_bias, -MAX_TEXTURE_LOD_BIAS, MAX_TEXTURE_LOD_BIAS);
  116. // FIXME: Instead of clamping to num_levels - 1, actually make the max mipmap level configurable with glTexParameteri(GL_TEXTURE_MAX_LEVEL, max_level)
  117. auto min_level = expand4(static_cast<float>(base_level));
  118. auto max_level = expand4(static_cast<float>(image.number_of_levels()) - 1.f);
  119. auto lambda_xy = log2_approximate(scale_factor) * .5f + texture_lod_bias;
  120. auto level = clamp(lambda_xy, min_level, max_level);
  121. auto lower_level_texel = sample_2d_lod(uv, to_u32x4(level), m_config.texture_min_filter);
  122. if (m_config.mipmap_filter == GPU::MipMapFilter::Nearest)
  123. return lower_level_texel;
  124. auto higher_level_texel = sample_2d_lod(uv, to_u32x4(min(level + 1.f, max_level)), m_config.texture_min_filter);
  125. return mix(lower_level_texel, higher_level_texel, frac_int_range(level));
  126. }
  127. Vector4<AK::SIMD::f32x4> Sampler::sample_2d_lod(Vector2<AK::SIMD::f32x4> const& uv, AK::SIMD::u32x4 level, GPU::TextureFilter filter) const
  128. {
  129. auto const& image = *static_ptr_cast<Image>(m_config.bound_image);
  130. u32x4 const width = {
  131. image.width_at_level(level[0]),
  132. image.width_at_level(level[1]),
  133. image.width_at_level(level[2]),
  134. image.width_at_level(level[3]),
  135. };
  136. u32x4 const height = {
  137. image.height_at_level(level[0]),
  138. image.height_at_level(level[1]),
  139. image.height_at_level(level[2]),
  140. image.height_at_level(level[3]),
  141. };
  142. auto f_width = to_f32x4(width);
  143. auto f_height = to_f32x4(height);
  144. u32x4 width_mask = width - 1;
  145. u32x4 height_mask = height - 1;
  146. f32x4 u = wrap(uv.x(), m_config.texture_wrap_u, f_width) * f_width;
  147. f32x4 v = wrap(uv.y(), m_config.texture_wrap_v, f_height) * f_height;
  148. if (filter == GPU::TextureFilter::Nearest) {
  149. u32x4 i = to_u32x4(u);
  150. u32x4 j = to_u32x4(v);
  151. i = image.width_is_power_of_two() ? i & width_mask : i % width;
  152. j = image.height_is_power_of_two() ? j & height_mask : j % height;
  153. return texel4(image, level, i, j);
  154. }
  155. u -= 0.5f;
  156. v -= 0.5f;
  157. f32x4 const floored_u = floor_int_range(u);
  158. f32x4 const floored_v = floor_int_range(v);
  159. u32x4 i0 = to_u32x4(floored_u);
  160. u32x4 i1 = i0 + 1;
  161. u32x4 j0 = to_u32x4(floored_v);
  162. u32x4 j1 = j0 + 1;
  163. if (m_config.texture_wrap_u == GPU::TextureWrapMode::Repeat) {
  164. if (image.width_is_power_of_two()) {
  165. i0 = i0 & width_mask;
  166. i1 = i1 & width_mask;
  167. } else {
  168. i0 = i0 % width;
  169. i1 = i1 % width;
  170. }
  171. }
  172. if (m_config.texture_wrap_v == GPU::TextureWrapMode::Repeat) {
  173. if (image.height_is_power_of_two()) {
  174. j0 = j0 & height_mask;
  175. j1 = j1 & height_mask;
  176. } else {
  177. j0 = j0 % height;
  178. j1 = j1 % height;
  179. }
  180. }
  181. Vector4<f32x4> t0, t1, t2, t3;
  182. if (m_config.texture_wrap_u == GPU::TextureWrapMode::Repeat && m_config.texture_wrap_v == GPU::TextureWrapMode::Repeat) {
  183. t0 = texel4(image, level, i0, j0);
  184. t1 = texel4(image, level, i1, j0);
  185. t2 = texel4(image, level, i0, j1);
  186. t3 = texel4(image, level, i1, j1);
  187. } else {
  188. t0 = texel4border(image, level, i0, j0, m_config.border_color, width, height);
  189. t1 = texel4border(image, level, i1, j0, m_config.border_color, width, height);
  190. t2 = texel4border(image, level, i0, j1, m_config.border_color, width, height);
  191. t3 = texel4border(image, level, i1, j1, m_config.border_color, width, height);
  192. }
  193. f32x4 const alpha = u - floored_u;
  194. f32x4 const beta = v - floored_v;
  195. auto const lerp_0 = mix(t0, t1, alpha);
  196. auto const lerp_1 = mix(t2, t3, alpha);
  197. return mix(lerp_0, lerp_1, beta);
  198. }
  199. }