Decoder.cpp 101 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835
  1. /*
  2. * Copyright (c) 2021, Hunter Salyer <thefalsehonesty@gmail.com>
  3. * Copyright (c) 2022, Gregory Bertilson <zaggy1024@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/IntegralMath.h>
  8. #include <LibGfx/Size.h>
  9. #include <LibVideo/Color/CodingIndependentCodePoints.h>
  10. #include "Decoder.h"
  11. #include "Utilities.h"
  12. #if defined(AK_COMPILER_GCC)
  13. # pragma GCC optimize("O3")
  14. #endif
  15. namespace Video::VP9 {
  16. Decoder::Decoder()
  17. : m_parser(make<Parser>(*this))
  18. {
  19. }
  20. DecoderErrorOr<void> Decoder::receive_sample(ReadonlyBytes chunk_data)
  21. {
  22. auto superframe_sizes = m_parser->parse_superframe_sizes(chunk_data);
  23. if (superframe_sizes.is_empty()) {
  24. return decode_frame(chunk_data);
  25. }
  26. size_t offset = 0;
  27. for (auto superframe_size : superframe_sizes) {
  28. auto checked_size = Checked<size_t>(superframe_size);
  29. checked_size += offset;
  30. if (checked_size.has_overflow() || checked_size.value() > chunk_data.size())
  31. return DecoderError::with_description(DecoderErrorCategory::Corrupted, "Superframe size invalid"sv);
  32. auto frame_data = chunk_data.slice(offset, superframe_size);
  33. TRY(decode_frame(frame_data));
  34. offset = checked_size.value();
  35. }
  36. return {};
  37. }
  38. inline size_t index_from_row_and_column(u32 row, u32 column, u32 stride)
  39. {
  40. return row * stride + column;
  41. }
  42. DecoderErrorOr<void> Decoder::decode_frame(ReadonlyBytes frame_data)
  43. {
  44. // 1. The syntax elements for the coded frame are extracted as specified in sections 6 and 7. The syntax
  45. // tables include function calls indicating when the block decode processes should be triggered.
  46. auto frame_context = TRY(m_parser->parse_frame(frame_data));
  47. // 2. If loop_filter_level is not equal to 0, the loop filter process as specified in section 8.8 is invoked once the
  48. // coded frame has been decoded.
  49. // FIXME: Implement loop filtering.
  50. // 3. If all of the following conditions are true, PrevSegmentIds[ row ][ col ] is set equal to
  51. // SegmentIds[ row ][ col ] for row = 0..MiRows-1, for col = 0..MiCols-1:
  52. // − show_existing_frame is equal to 0,
  53. // − segmentation_enabled is equal to 1,
  54. // − segmentation_update_map is equal to 1.
  55. // This is handled by update_reference_frames.
  56. // 4. The output process as specified in section 8.9 is invoked.
  57. if (frame_context.shows_a_frame())
  58. TRY(create_video_frame(frame_context));
  59. // 5. The reference frame update process as specified in section 8.10 is invoked.
  60. TRY(update_reference_frames(frame_context));
  61. return {};
  62. }
  63. inline CodingIndependentCodePoints get_cicp_color_space(FrameContext const& frame_context)
  64. {
  65. ColorPrimaries color_primaries;
  66. TransferCharacteristics transfer_characteristics;
  67. MatrixCoefficients matrix_coefficients;
  68. switch (frame_context.color_config.color_space) {
  69. case ColorSpace::Unknown:
  70. color_primaries = ColorPrimaries::Unspecified;
  71. transfer_characteristics = TransferCharacteristics::Unspecified;
  72. matrix_coefficients = MatrixCoefficients::Unspecified;
  73. break;
  74. case ColorSpace::Bt601:
  75. color_primaries = ColorPrimaries::BT601;
  76. transfer_characteristics = TransferCharacteristics::BT601;
  77. matrix_coefficients = MatrixCoefficients::BT601;
  78. break;
  79. case ColorSpace::Bt709:
  80. color_primaries = ColorPrimaries::BT709;
  81. transfer_characteristics = TransferCharacteristics::BT709;
  82. matrix_coefficients = MatrixCoefficients::BT709;
  83. break;
  84. case ColorSpace::Smpte170:
  85. // https://www.kernel.org/doc/html/v4.9/media/uapi/v4l/pixfmt-007.html#colorspace-smpte-170m-v4l2-colorspace-smpte170m
  86. color_primaries = ColorPrimaries::BT601;
  87. transfer_characteristics = TransferCharacteristics::BT709;
  88. matrix_coefficients = MatrixCoefficients::BT601;
  89. break;
  90. case ColorSpace::Smpte240:
  91. color_primaries = ColorPrimaries::SMPTE240;
  92. transfer_characteristics = TransferCharacteristics::SMPTE240;
  93. matrix_coefficients = MatrixCoefficients::SMPTE240;
  94. break;
  95. case ColorSpace::Bt2020:
  96. color_primaries = ColorPrimaries::BT2020;
  97. // Bit depth doesn't actually matter to our transfer functions since we
  98. // convert in floats of range 0-1 (for now?), but just for correctness set
  99. // the TC to match the bit depth here.
  100. if (frame_context.color_config.bit_depth == 12)
  101. transfer_characteristics = TransferCharacteristics::BT2020BitDepth12;
  102. else if (frame_context.color_config.bit_depth == 10)
  103. transfer_characteristics = TransferCharacteristics::BT2020BitDepth10;
  104. else
  105. transfer_characteristics = TransferCharacteristics::BT709;
  106. matrix_coefficients = MatrixCoefficients::BT2020NonConstantLuminance;
  107. break;
  108. case ColorSpace::RGB:
  109. color_primaries = ColorPrimaries::BT709;
  110. transfer_characteristics = TransferCharacteristics::Linear;
  111. matrix_coefficients = MatrixCoefficients::Identity;
  112. break;
  113. case ColorSpace::Reserved:
  114. VERIFY_NOT_REACHED();
  115. break;
  116. }
  117. return { color_primaries, transfer_characteristics, matrix_coefficients, frame_context.color_config.color_range };
  118. }
  119. DecoderErrorOr<void> Decoder::create_video_frame(FrameContext const& frame_context)
  120. {
  121. // (8.9) Output process
  122. // FIXME: If show_existing_frame is set, output from FrameStore[frame_to_show_map_index] here instead.
  123. // FIXME: The math isn't entirely accurate to spec. output_uv_size is probably incorrect for certain
  124. // sizes, as the spec seems to prefer that the halved sizes be ceiled.
  125. u32 decoded_y_width = frame_context.columns() * 8;
  126. Gfx::Size<u32> output_y_size = frame_context.size();
  127. auto decoded_uv_width = decoded_y_width >> frame_context.color_config.subsampling_x;
  128. Gfx::Size<u32> output_uv_size = {
  129. output_y_size.width() >> frame_context.color_config.subsampling_x,
  130. output_y_size.height() >> frame_context.color_config.subsampling_y,
  131. };
  132. Array<FixedArray<u16>, 3> output_buffers = {
  133. DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_y_size.width() * output_y_size.height())),
  134. DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_uv_size.width() * output_uv_size.height())),
  135. DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_uv_size.width() * output_uv_size.height())),
  136. };
  137. for (u8 plane = 0; plane < 3; plane++) {
  138. auto& buffer = output_buffers[plane];
  139. auto decoded_width = plane == 0 ? decoded_y_width : decoded_uv_width;
  140. auto output_size = plane == 0 ? output_y_size : output_uv_size;
  141. auto const& decoded_buffer = get_output_buffer(plane);
  142. for (u32 row = 0; row < output_size.height(); row++) {
  143. memcpy(
  144. buffer.data() + row * output_size.width(),
  145. decoded_buffer.data() + row * decoded_width,
  146. output_size.width() * sizeof(*buffer.data()));
  147. }
  148. }
  149. auto frame = DECODER_TRY_ALLOC(adopt_nonnull_own_or_enomem(new (nothrow) SubsampledYUVFrame(
  150. { output_y_size.width(), output_y_size.height() },
  151. frame_context.color_config.bit_depth, get_cicp_color_space(frame_context),
  152. frame_context.color_config.subsampling_x, frame_context.color_config.subsampling_y,
  153. output_buffers[0], output_buffers[1], output_buffers[2])));
  154. m_video_frame_queue.enqueue(move(frame));
  155. return {};
  156. }
  157. inline size_t buffer_size(size_t width, size_t height)
  158. {
  159. return width * height;
  160. }
  161. inline size_t buffer_size(Gfx::Size<size_t> size)
  162. {
  163. return buffer_size(size.width(), size.height());
  164. }
  165. DecoderErrorOr<void> Decoder::allocate_buffers(FrameContext const& frame_context)
  166. {
  167. for (size_t plane = 0; plane < 3; plane++) {
  168. auto size = m_parser->get_decoded_size_for_plane(frame_context, plane);
  169. auto& output_buffer = get_output_buffer(plane);
  170. output_buffer.clear_with_capacity();
  171. DECODER_TRY_ALLOC(output_buffer.try_resize_and_keep_capacity(buffer_size(size)));
  172. }
  173. return {};
  174. }
  175. Vector<u16>& Decoder::get_output_buffer(u8 plane)
  176. {
  177. return m_output_buffers[plane];
  178. }
  179. DecoderErrorOr<NonnullOwnPtr<VideoFrame>> Decoder::get_decoded_frame()
  180. {
  181. if (m_video_frame_queue.is_empty())
  182. return DecoderError::format(DecoderErrorCategory::NeedsMoreInput, "No video frame in queue.");
  183. return m_video_frame_queue.dequeue();
  184. }
  185. u8 Decoder::merge_prob(u8 pre_prob, u8 count_0, u8 count_1, u8 count_sat, u8 max_update_factor)
  186. {
  187. auto total_decode_count = count_0 + count_1;
  188. auto prob = (total_decode_count == 0) ? 128 : clip_3(1, 255, (count_0 * 256 + (total_decode_count >> 1)) / total_decode_count);
  189. auto count = min(total_decode_count, count_sat);
  190. auto factor = (max_update_factor * count) / count_sat;
  191. return round_2(pre_prob * (256 - factor) + (prob * factor), 8);
  192. }
  193. u8 Decoder::merge_probs(int const* tree, int index, u8* probs, u8* counts, u8 count_sat, u8 max_update_factor)
  194. {
  195. auto s = tree[index];
  196. auto left_count = (s <= 0) ? counts[-s] : merge_probs(tree, s, probs, counts, count_sat, max_update_factor);
  197. auto r = tree[index + 1];
  198. auto right_count = (r <= 0) ? counts[-r] : merge_probs(tree, r, probs, counts, count_sat, max_update_factor);
  199. probs[index >> 1] = merge_prob(probs[index >> 1], left_count, right_count, count_sat, max_update_factor);
  200. return left_count + right_count;
  201. }
  202. DecoderErrorOr<void> Decoder::adapt_coef_probs(bool is_inter_predicted_frame)
  203. {
  204. u8 update_factor;
  205. if (!is_inter_predicted_frame || m_parser->m_previous_frame_type != FrameType::KeyFrame)
  206. update_factor = 112;
  207. else
  208. update_factor = 128;
  209. for (size_t t = 0; t < 4; t++) {
  210. for (size_t i = 0; i < 2; i++) {
  211. for (size_t j = 0; j < 2; j++) {
  212. for (size_t k = 0; k < 6; k++) {
  213. size_t max_l = (k == 0) ? 3 : 6;
  214. for (size_t l = 0; l < max_l; l++) {
  215. auto& coef_probs = m_parser->m_probability_tables->coef_probs()[t][i][j][k][l];
  216. merge_probs(small_token_tree, 2, coef_probs,
  217. m_parser->m_syntax_element_counter->m_counts_token[t][i][j][k][l],
  218. 24, update_factor);
  219. merge_probs(binary_tree, 0, coef_probs,
  220. m_parser->m_syntax_element_counter->m_counts_more_coefs[t][i][j][k][l],
  221. 24, update_factor);
  222. }
  223. }
  224. }
  225. }
  226. }
  227. return {};
  228. }
  229. #define ADAPT_PROB_TABLE(name, size) \
  230. do { \
  231. for (size_t i = 0; i < (size); i++) { \
  232. auto table = probs.name##_prob(); \
  233. table[i] = adapt_prob(table[i], counter.m_counts_##name[i]); \
  234. } \
  235. } while (0)
  236. #define ADAPT_TREE(tree_name, prob_name, count_name, size) \
  237. do { \
  238. for (size_t i = 0; i < (size); i++) { \
  239. adapt_probs(tree_name##_tree, probs.prob_name##_probs()[i], counter.m_counts_##count_name[i]); \
  240. } \
  241. } while (0)
  242. DecoderErrorOr<void> Decoder::adapt_non_coef_probs(FrameContext const& frame_context)
  243. {
  244. auto& probs = *m_parser->m_probability_tables;
  245. auto& counter = *m_parser->m_syntax_element_counter;
  246. ADAPT_PROB_TABLE(is_inter, IS_INTER_CONTEXTS);
  247. ADAPT_PROB_TABLE(comp_mode, COMP_MODE_CONTEXTS);
  248. ADAPT_PROB_TABLE(comp_ref, REF_CONTEXTS);
  249. for (size_t i = 0; i < REF_CONTEXTS; i++) {
  250. for (size_t j = 0; j < 2; j++)
  251. probs.single_ref_prob()[i][j] = adapt_prob(probs.single_ref_prob()[i][j], counter.m_counts_single_ref[i][j]);
  252. }
  253. ADAPT_TREE(inter_mode, inter_mode, inter_mode, INTER_MODE_CONTEXTS);
  254. ADAPT_TREE(intra_mode, y_mode, intra_mode, INTER_MODE_CONTEXTS);
  255. ADAPT_TREE(intra_mode, uv_mode, uv_mode, INTER_MODE_CONTEXTS);
  256. ADAPT_TREE(partition, partition, partition, INTER_MODE_CONTEXTS);
  257. ADAPT_PROB_TABLE(skip, SKIP_CONTEXTS);
  258. if (frame_context.interpolation_filter == Switchable) {
  259. ADAPT_TREE(interp_filter, interp_filter, interp_filter, INTERP_FILTER_CONTEXTS);
  260. }
  261. if (frame_context.transform_mode == TXModeSelect) {
  262. for (size_t i = 0; i < TX_SIZE_CONTEXTS; i++) {
  263. auto& tx_probs = probs.tx_probs();
  264. auto& tx_counts = counter.m_counts_tx_size;
  265. adapt_probs(tx_size_8_tree, tx_probs[TX_8x8][i], tx_counts[TX_8x8][i]);
  266. adapt_probs(tx_size_16_tree, tx_probs[TX_16x16][i], tx_counts[TX_16x16][i]);
  267. adapt_probs(tx_size_32_tree, tx_probs[TX_32x32][i], tx_counts[TX_32x32][i]);
  268. }
  269. }
  270. adapt_probs(mv_joint_tree, probs.mv_joint_probs(), counter.m_counts_mv_joint);
  271. for (size_t i = 0; i < 2; i++) {
  272. probs.mv_sign_prob()[i] = adapt_prob(probs.mv_sign_prob()[i], counter.m_counts_mv_sign[i]);
  273. adapt_probs(mv_class_tree, probs.mv_class_probs()[i], counter.m_counts_mv_class[i]);
  274. probs.mv_class0_bit_prob()[i] = adapt_prob(probs.mv_class0_bit_prob()[i], counter.m_counts_mv_class0_bit[i]);
  275. for (size_t j = 0; j < MV_OFFSET_BITS; j++)
  276. probs.mv_bits_prob()[i][j] = adapt_prob(probs.mv_bits_prob()[i][j], counter.m_counts_mv_bits[i][j]);
  277. for (size_t j = 0; j < CLASS0_SIZE; j++)
  278. adapt_probs(mv_fr_tree, probs.mv_class0_fr_probs()[i][j], counter.m_counts_mv_class0_fr[i][j]);
  279. adapt_probs(mv_fr_tree, probs.mv_fr_probs()[i], counter.m_counts_mv_fr[i]);
  280. if (frame_context.high_precision_motion_vectors_allowed) {
  281. probs.mv_class0_hp_prob()[i] = adapt_prob(probs.mv_class0_hp_prob()[i], counter.m_counts_mv_class0_hp[i]);
  282. probs.mv_hp_prob()[i] = adapt_prob(probs.mv_hp_prob()[i], counter.m_counts_mv_hp[i]);
  283. }
  284. }
  285. return {};
  286. }
  287. void Decoder::adapt_probs(int const* tree, u8* probs, u8* counts)
  288. {
  289. merge_probs(tree, 0, probs, counts, COUNT_SAT, MAX_UPDATE_FACTOR);
  290. }
  291. u8 Decoder::adapt_prob(u8 prob, u8 counts[2])
  292. {
  293. return merge_prob(prob, counts[0], counts[1], COUNT_SAT, MAX_UPDATE_FACTOR);
  294. }
  295. DecoderErrorOr<void> Decoder::predict_intra(u8 plane, BlockContext const& block_context, u32 x, u32 y, bool have_left, bool have_above, bool not_on_right, TXSize tx_size, u32 block_index)
  296. {
  297. auto& frame_buffer = get_output_buffer(plane);
  298. // 8.5.1 Intra prediction process
  299. // The intra prediction process is invoked for intra coded blocks to predict a part of the block corresponding to a
  300. // transform block. When the transform size is smaller than the block size, this process can be invoked multiple
  301. // times within a single block for the same plane, and the invocations are in raster order within the block.
  302. // The variable mode is specified by:
  303. // 1. If plane is greater than 0, mode is set equal to uv_mode.
  304. // 2. Otherwise, if MiSize is greater than or equal to BLOCK_8X8, mode is set equal to y_mode.
  305. // 3. Otherwise, mode is set equal to sub_modes[ blockIdx ].
  306. PredictionMode mode;
  307. if (plane > 0)
  308. mode = block_context.uv_prediction_mode;
  309. else if (block_context.size >= Block_8x8)
  310. mode = block_context.y_prediction_mode();
  311. else
  312. mode = block_context.sub_block_prediction_modes[block_index];
  313. // The variable log2Size specifying the base 2 logarithm of the width of the transform block is set equal to txSz + 2.
  314. u8 log2_of_block_size = tx_size + 2;
  315. // The variable size is set equal to 1 << log2Size.
  316. u8 block_size = 1 << log2_of_block_size;
  317. // The variable maxX is set equal to (MiCols * 8) - 1.
  318. // The variable maxY is set equal to (MiRows * 8) - 1.
  319. // If plane is greater than 0, then:
  320. // − maxX is set equal to ((MiCols * 8) >> subsampling_x) - 1.
  321. // − maxY is set equal to ((MiRows * 8) >> subsampling_y) - 1.
  322. auto subsampling_x = plane > 0 ? block_context.frame_context.color_config.subsampling_x : false;
  323. auto subsampling_y = plane > 0 ? block_context.frame_context.color_config.subsampling_y : false;
  324. auto max_x = ((block_context.frame_context.columns() * 8u) >> subsampling_x) - 1u;
  325. auto max_y = ((block_context.frame_context.rows() * 8u) >> subsampling_y) - 1u;
  326. auto const frame_buffer_at = [&](u32 row, u32 column) -> u16& {
  327. const auto frame_stride = max_x + 1u;
  328. return frame_buffer[index_from_row_and_column(row, column, frame_stride)];
  329. };
  330. // The array aboveRow[ i ] for i = 0..size-1 is specified by:
  331. // ..
  332. // The array aboveRow[ i ] for i = size..2*size-1 is specified by:
  333. // ..
  334. // The array aboveRow[ i ] for i = -1 is specified by:
  335. // ..
  336. // NOTE: above_row is an array ranging from 0 to (2*block_size).
  337. // There are three sections to the array:
  338. // - [0]
  339. // - [1 .. block_size]
  340. // - [block_size + 1 .. block_size * 2]
  341. // The array indices must be offset by 1 to accommodate index -1.
  342. Array<Intermediate, maximum_block_dimensions * 2 + 1> above_row;
  343. auto above_row_at = [&](i32 index) -> Intermediate& {
  344. return above_row[index + 1];
  345. };
  346. // NOTE: This value is pre-calculated since it is reused in spec below.
  347. // Use this to replace spec text "(1<<(BitDepth-1))".
  348. Intermediate half_sample_value = (1 << (block_context.frame_context.color_config.bit_depth - 1));
  349. // The array aboveRow[ i ] for i = 0..size-1 is specified by:
  350. if (!have_above) {
  351. // 1. If haveAbove is equal to 0, aboveRow[ i ] is set equal to (1<<(BitDepth-1)) - 1.
  352. // FIXME: Use memset?
  353. for (auto i = 0u; i < block_size; i++)
  354. above_row_at(i) = half_sample_value - 1;
  355. } else {
  356. // 2. Otherwise, aboveRow[ i ] is set equal to CurrFrame[ plane ][ y-1 ][ Min(maxX, x+i) ].
  357. for (auto i = 0u; i < block_size; i++)
  358. above_row_at(i) = frame_buffer_at(y - 1, min(max_x, x + i));
  359. }
  360. // The array aboveRow[ i ] for i = size..2*size-1 is specified by:
  361. if (have_above && not_on_right && tx_size == TXSize::TX_4x4) {
  362. // 1. If haveAbove is equal to 1 and notOnRight is equal to 1 and txSz is equal to 0,
  363. // aboveRow[ i ] is set equal to CurrFrame[ plane ][ y-1 ][ Min(maxX, x+i) ].
  364. for (auto i = block_size; i < block_size * 2; i++)
  365. above_row_at(i) = frame_buffer_at(y - 1, min(max_x, x + i));
  366. } else {
  367. // 2. Otherwise, aboveRow[ i ] is set equal to aboveRow[ size-1 ].
  368. for (auto i = block_size; i < block_size * 2; i++)
  369. above_row_at(i) = above_row_at(block_size - 1);
  370. }
  371. // The array aboveRow[ i ] for i = -1 is specified by:
  372. if (have_above && have_left) {
  373. // 1. If haveAbove is equal to 1 and haveLeft is equal to 1, aboveRow[ -1 ] is set equal to
  374. // CurrFrame[ plane ][ y-1 ][ Min(maxX, x-1) ].
  375. above_row_at(-1) = frame_buffer_at(y - 1, min(max_x, x - 1));
  376. } else if (have_above) {
  377. // 2. Otherwise if haveAbove is equal to 1, aboveRow[ -1] is set equal to (1<<(BitDepth-1)) + 1.
  378. above_row_at(-1) = half_sample_value + 1;
  379. } else {
  380. // 3. Otherwise, aboveRow[ -1 ] is set equal to (1<<(BitDepth-1)) - 1
  381. above_row_at(-1) = half_sample_value - 1;
  382. }
  383. // The array leftCol[ i ] for i = 0..size-1 is specified by:
  384. Array<Intermediate, maximum_block_dimensions> left_column;
  385. if (have_left) {
  386. // − If haveLeft is equal to 1, leftCol[ i ] is set equal to CurrFrame[ plane ][ Min(maxY, y+i) ][ x-1 ].
  387. for (auto i = 0u; i < block_size; i++)
  388. left_column[i] = frame_buffer_at(min(max_y, y + i), x - 1);
  389. } else {
  390. // − Otherwise, leftCol[ i ] is set equal to (1<<(BitDepth-1)) + 1.
  391. for (auto i = 0u; i < block_size; i++)
  392. left_column[i] = half_sample_value + 1;
  393. }
  394. // A 2D array named pred containing the intra predicted samples is constructed as follows:
  395. Array<Intermediate, maximum_block_size> predicted_samples;
  396. auto const predicted_sample_at = [&](u32 row, u32 column) -> Intermediate& {
  397. return predicted_samples[index_from_row_and_column(row, column, block_size)];
  398. };
  399. // FIXME: One of the two below should be a simple memcpy of 1D arrays.
  400. switch (mode) {
  401. case PredictionMode::VPred:
  402. // − If mode is equal to V_PRED, pred[ i ][ j ] is set equal to aboveRow[ j ] with j = 0..size-1 and i = 0..size-1
  403. // (each row of the block is filled with a copy of aboveRow).
  404. for (auto j = 0u; j < block_size; j++) {
  405. for (auto i = 0u; i < block_size; i++)
  406. predicted_sample_at(i, j) = above_row_at(j);
  407. }
  408. break;
  409. case PredictionMode::HPred:
  410. // − Otherwise if mode is equal to H_PRED, pred[ i ][ j ] is set equal to leftCol[ i ] with j = 0..size-1 and i =
  411. // 0..size-1 (each column of the block is filled with a copy of leftCol).
  412. for (auto j = 0u; j < block_size; j++) {
  413. for (auto i = 0u; i < block_size; i++)
  414. predicted_sample_at(i, j) = left_column[i];
  415. }
  416. break;
  417. case PredictionMode::D207Pred:
  418. // − Otherwise if mode is equal to D207_PRED, the following applies:
  419. // 1. pred[ size - 1 ][ j ] = leftCol[ size - 1] for j = 0..size-1
  420. for (auto j = 0u; j < block_size; j++)
  421. predicted_sample_at(block_size - 1, j) = left_column[block_size - 1];
  422. // 2. pred[ i ][ 0 ] = Round2( leftCol[ i ] + leftCol[ i + 1 ], 1 ) for i = 0..size-2
  423. for (auto i = 0u; i < block_size - 1u; i++)
  424. predicted_sample_at(i, 0) = round_2(left_column[i] + left_column[i + 1], 1);
  425. // 3. pred[ i ][ 1 ] = Round2( leftCol[ i ] + 2 * leftCol[ i + 1 ] + leftCol[ i + 2 ], 2 ) for i = 0..size-3
  426. for (auto i = 0u; i < block_size - 2u; i++)
  427. predicted_sample_at(i, 1) = round_2(left_column[i] + (2 * left_column[i + 1]) + left_column[i + 2], 2);
  428. // 4. pred[ size - 2 ][ 1 ] = Round2( leftCol[ size - 2 ] + 3 * leftCol[ size - 1 ], 2 )
  429. predicted_sample_at(block_size - 2, 1) = round_2(left_column[block_size - 2] + (3 * left_column[block_size - 1]), 2);
  430. // 5. pred[ i ][ j ] = pred[ i + 1 ][ j - 2 ] for i = (size-2)..0, for j = 2..size-1
  431. // NOTE – In the last step i iterates in reverse order.
  432. for (auto i = block_size - 2u;;) {
  433. for (auto j = 2u; j < block_size; j++)
  434. predicted_sample_at(i, j) = predicted_sample_at(i + 1, j - 2);
  435. if (i == 0)
  436. break;
  437. i--;
  438. }
  439. break;
  440. case PredictionMode::D45Pred:
  441. // Otherwise if mode is equal to D45_PRED,
  442. // for i = 0..size-1, for j = 0..size-1.
  443. for (auto i = 0u; i < block_size; i++) {
  444. for (auto j = 0; j < block_size; j++) {
  445. // pred[ i ][ j ] is set equal to (i + j + 2 < size * 2) ?
  446. if (i + j + 2 < block_size * 2)
  447. // Round2( aboveRow[ i + j ] + aboveRow[ i + j + 1 ] * 2 + aboveRow[ i + j + 2 ], 2 ) :
  448. predicted_sample_at(i, j) = round_2(above_row_at(i + j) + above_row_at(i + j + 1) * 2 + above_row_at(i + j + 2), 2);
  449. else
  450. // aboveRow[ 2 * size - 1 ]
  451. predicted_sample_at(i, j) = above_row_at(2 * block_size - 1);
  452. }
  453. }
  454. break;
  455. case PredictionMode::D63Pred:
  456. // Otherwise if mode is equal to D63_PRED,
  457. for (auto i = 0u; i < block_size; i++) {
  458. for (auto j = 0u; j < block_size; j++) {
  459. // i/2 + j
  460. auto row_index = (i / 2) + j;
  461. // pred[ i ][ j ] is set equal to (i & 1) ?
  462. if (i & 1)
  463. // Round2( aboveRow[ i/2 + j ] + aboveRow[ i/2 + j + 1 ] * 2 + aboveRow[ i/2 + j + 2 ], 2 ) :
  464. predicted_sample_at(i, j) = round_2(above_row_at(row_index) + above_row_at(row_index + 1) * 2 + above_row_at(row_index + 2), 2);
  465. else
  466. // Round2( aboveRow[ i/2 + j ] + aboveRow[ i/2 + j + 1 ], 1 ) for i = 0..size-1, for j = 0..size-1.
  467. predicted_sample_at(i, j) = round_2(above_row_at(row_index) + above_row_at(row_index + 1), 1);
  468. }
  469. }
  470. break;
  471. case PredictionMode::D117Pred:
  472. // Otherwise if mode is equal to D117_PRED, the following applies:
  473. // 1. pred[ 0 ][ j ] = Round2( aboveRow[ j - 1 ] + aboveRow[ j ], 1 ) for j = 0..size-1
  474. for (auto j = 0; j < block_size; j++)
  475. predicted_sample_at(0, j) = round_2(above_row_at(j - 1) + above_row_at(j), 1);
  476. // 2. pred[ 1 ][ 0 ] = Round2( leftCol[ 0 ] + 2 * aboveRow[ -1 ] + aboveRow[ 0 ], 2 )
  477. predicted_sample_at(1, 0) = round_2(left_column[0] + 2 * above_row_at(-1) + above_row_at(0), 2);
  478. // 3. pred[ 1 ][ j ] = Round2( aboveRow[ j - 2 ] + 2 * aboveRow[ j - 1 ] + aboveRow[ j ], 2 ) for j = 1..size-1
  479. for (auto j = 1; j < block_size; j++)
  480. predicted_sample_at(1, j) = round_2(above_row_at(j - 2) + 2 * above_row_at(j - 1) + above_row_at(j), 2);
  481. // 4. pred[ 2 ][ 0 ] = Round2( aboveRow[ -1 ] + 2 * leftCol[ 0 ] + leftCol[ 1 ], 2 )
  482. predicted_sample_at(2, 0) = round_2(above_row_at(-1) + 2 * left_column[0] + left_column[1], 2);
  483. // 5. pred[ i ][ 0 ] = Round2( leftCol[ i - 3 ] + 2 * leftCol[ i - 2 ] + leftCol[ i - 1 ], 2 ) for i = 3..size-1
  484. for (auto i = 3u; i < block_size; i++)
  485. predicted_sample_at(i, 0) = round_2(left_column[i - 3] + 2 * left_column[i - 2] + left_column[i - 1], 2);
  486. // 6. pred[ i ][ j ] = pred[ i - 2 ][ j - 1 ] for i = 2..size-1, for j = 1..size-1
  487. for (auto i = 2u; i < block_size; i++) {
  488. for (auto j = 1u; j < block_size; j++)
  489. predicted_sample_at(i, j) = predicted_sample_at(i - 2, j - 1);
  490. }
  491. break;
  492. case PredictionMode::D135Pred:
  493. // Otherwise if mode is equal to D135_PRED, the following applies:
  494. // 1. pred[ 0 ][ 0 ] = Round2( leftCol[ 0 ] + 2 * aboveRow[ -1 ] + aboveRow[ 0 ], 2 )
  495. predicted_sample_at(0, 0) = round_2(left_column[0] + 2 * above_row_at(-1) + above_row_at(0), 2);
  496. // 2. pred[ 0 ][ j ] = Round2( aboveRow[ j - 2 ] + 2 * aboveRow[ j - 1 ] + aboveRow[ j ], 2 ) for j = 1..size-1
  497. for (auto j = 1; j < block_size; j++)
  498. predicted_sample_at(0, j) = round_2(above_row_at(j - 2) + 2 * above_row_at(j - 1) + above_row_at(j), 2);
  499. // 3. pred[ 1 ][ 0 ] = Round2( aboveRow [ -1 ] + 2 * leftCol[ 0 ] + leftCol[ 1 ], 2 ) for i = 1..size-1
  500. predicted_sample_at(1, 0) = round_2(above_row_at(-1) + 2 * left_column[0] + left_column[1], 2);
  501. // 4. pred[ i ][ 0 ] = Round2( leftCol[ i - 2 ] + 2 * leftCol[ i - 1 ] + leftCol[ i ], 2 ) for i = 2..size-1
  502. for (auto i = 2u; i < block_size; i++)
  503. predicted_sample_at(i, 0) = round_2(left_column[i - 2] + 2 * left_column[i - 1] + left_column[i], 2);
  504. // 5. pred[ i ][ j ] = pred[ i - 1 ][ j - 1 ] for i = 1..size-1, for j = 1..size-1
  505. for (auto i = 1u; i < block_size; i++) {
  506. for (auto j = 1; j < block_size; j++)
  507. predicted_sample_at(i, j) = predicted_sample_at(i - 1, j - 1);
  508. }
  509. break;
  510. case PredictionMode::D153Pred:
  511. // Otherwise if mode is equal to D153_PRED, the following applies:
  512. // 1. pred[ 0 ][ 0 ] = Round2( leftCol[ 0 ] + aboveRow[ -1 ], 1 )
  513. predicted_sample_at(0, 0) = round_2(left_column[0] + above_row_at(-1), 1);
  514. // 2. pred[ i ][ 0 ] = Round2( leftCol[ i - 1] + leftCol[ i ], 1 ) for i = 1..size-1
  515. for (auto i = 1u; i < block_size; i++)
  516. predicted_sample_at(i, 0) = round_2(left_column[i - 1] + left_column[i], 1);
  517. // 3. pred[ 0 ][ 1 ] = Round2( leftCol[ 0 ] + 2 * aboveRow[ -1 ] + aboveRow[ 0 ], 2 )
  518. predicted_sample_at(0, 1) = round_2(left_column[0] + 2 * above_row_at(-1) + above_row_at(0), 2);
  519. // 4. pred[ 1 ][ 1 ] = Round2( aboveRow[ -1 ] + 2 * leftCol [ 0 ] + leftCol [ 1 ], 2 )
  520. predicted_sample_at(1, 1) = round_2(above_row_at(-1) + 2 * left_column[0] + left_column[1], 2);
  521. // 5. pred[ i ][ 1 ] = Round2( leftCol[ i - 2 ] + 2 * leftCol[ i - 1 ] + leftCol[ i ], 2 ) for i = 2..size-1
  522. for (auto i = 2u; i < block_size; i++)
  523. predicted_sample_at(i, 1) = round_2(left_column[i - 2] + 2 * left_column[i - 1] + left_column[i], 2);
  524. // 6. pred[ 0 ][ j ] = Round2( aboveRow[ j - 3 ] + 2 * aboveRow[ j - 2 ] + aboveRow[ j - 1 ], 2 ) for j = 2..size-1
  525. for (auto j = 2; j < block_size; j++)
  526. predicted_sample_at(0, j) = round_2(above_row_at(j - 3) + 2 * above_row_at(j - 2) + above_row_at(j - 1), 2);
  527. // 7. pred[ i ][ j ] = pred[ i - 1 ][ j - 2 ] for i = 1..size-1, for j = 2..size-1
  528. for (auto i = 1u; i < block_size; i++) {
  529. for (auto j = 2u; j < block_size; j++)
  530. predicted_sample_at(i, j) = predicted_sample_at(i - 1, j - 2);
  531. }
  532. break;
  533. case PredictionMode::TmPred:
  534. // Otherwise if mode is equal to TM_PRED,
  535. // pred[ i ][ j ] is set equal to Clip1( aboveRow[ j ] + leftCol[ i ] - aboveRow[ -1 ] )
  536. // for i = 0..size-1, for j = 0..size-1.
  537. for (auto i = 0u; i < block_size; i++) {
  538. for (auto j = 0u; j < block_size; j++)
  539. predicted_sample_at(i, j) = clip_1(block_context.frame_context.color_config.bit_depth, above_row_at(j) + left_column[i] - above_row_at(-1));
  540. }
  541. break;
  542. case PredictionMode::DcPred: {
  543. Intermediate average = 0;
  544. if (have_left && have_above) {
  545. // Otherwise if mode is equal to DC_PRED and haveLeft is equal to 1 and haveAbove is equal to 1,
  546. // The variable avg (the average of the samples in union of aboveRow and leftCol)
  547. // is specified as follows:
  548. // sum = 0
  549. // for ( k = 0; k < size; k++ ) {
  550. // sum += leftCol[ k ]
  551. // sum += aboveRow[ k ]
  552. // }
  553. // avg = (sum + size) >> (log2Size + 1)
  554. Intermediate sum = 0;
  555. for (auto k = 0u; k < block_size; k++) {
  556. sum += left_column[k];
  557. sum += above_row_at(k);
  558. }
  559. average = (sum + block_size) >> (log2_of_block_size + 1);
  560. } else if (have_left && !have_above) {
  561. // Otherwise if mode is equal to DC_PRED and haveLeft is equal to 1 and haveAbove is equal to 0,
  562. // The variable leftAvg is specified as follows:
  563. // sum = 0
  564. // for ( k = 0; k < size; k++ ) {
  565. // sum += leftCol[ k ]
  566. // }
  567. // leftAvg = (sum + (1 << (log2Size - 1) ) ) >> log2Size
  568. Intermediate sum = 0;
  569. for (auto k = 0u; k < block_size; k++)
  570. sum += left_column[k];
  571. average = (sum + (1 << (log2_of_block_size - 1))) >> log2_of_block_size;
  572. } else if (!have_left && have_above) {
  573. // Otherwise if mode is equal to DC_PRED and haveLeft is equal to 0 and haveAbove is equal to 1,
  574. // The variable aboveAvg is specified as follows:
  575. // sum = 0
  576. // for ( k = 0; k < size; k++ ) {
  577. // sum += aboveRow[ k ]
  578. // }
  579. // aboveAvg = (sum + (1 << (log2Size - 1) ) ) >> log2Size
  580. Intermediate sum = 0;
  581. for (auto k = 0u; k < block_size; k++)
  582. sum += above_row_at(k);
  583. average = (sum + (1 << (log2_of_block_size - 1))) >> log2_of_block_size;
  584. } else {
  585. // Otherwise (mode is DC_PRED),
  586. // pred[ i ][ j ] is set equal to 1<<(BitDepth - 1) with i = 0..size-1 and j = 0..size-1.
  587. average = 1 << (block_context.frame_context.color_config.bit_depth - 1);
  588. }
  589. // pred[ i ][ j ] is set equal to avg with i = 0..size-1 and j = 0..size-1.
  590. for (auto i = 0u; i < block_size; i++) {
  591. for (auto j = 0u; j < block_size; j++)
  592. predicted_sample_at(i, j) = average;
  593. }
  594. break;
  595. }
  596. default:
  597. dbgln("Unknown prediction mode {}", static_cast<u8>(mode));
  598. VERIFY_NOT_REACHED();
  599. }
  600. // The current frame is updated as follows:
  601. // − CurrFrame[ plane ][ y + i ][ x + j ] is set equal to pred[ i ][ j ] for i = 0..size-1 and j = 0..size-1.
  602. auto width_in_frame_buffer = min(static_cast<u32>(block_size), max_x - x + 1);
  603. auto height_in_frame_buffer = min(static_cast<u32>(block_size), max_y - y + 1);
  604. for (auto i = 0u; i < height_in_frame_buffer; i++) {
  605. for (auto j = 0u; j < width_in_frame_buffer; j++)
  606. frame_buffer_at(y + i, x + j) = predicted_sample_at(i, j);
  607. }
  608. return {};
  609. }
  610. MotionVector Decoder::select_motion_vector(u8 plane, BlockContext const& block_context, ReferenceIndex reference_index, u32 block_index)
  611. {
  612. // The inputs to this process are:
  613. // − a variable plane specifying which plane is being predicted,
  614. // − a variable refList specifying that we should select the motion vector from BlockMvs[ refList ],
  615. // − a variable blockIdx, specifying how much of the block has already been predicted in units of 4x4 samples.
  616. // The output of this process is a 2 element array called mv containing the motion vector for this block.
  617. // The purpose of this process is to find the motion vector for this block. Motion vectors are specified for each
  618. // luma block, but a chroma block may cover more than one luma block due to subsampling. In this case, an
  619. // average motion vector is constructed for the chroma block.
  620. // The functions round_mv_comp_q2 and round_mv_comp_q4 perform division with rounding to the nearest
  621. // integer and are specified as:
  622. auto round_mv_comp_q2 = [&](MotionVector in) {
  623. // return (value < 0 ? value - 1 : value + 1) / 2
  624. return MotionVector {
  625. (in.row() < 0 ? in.row() - 1 : in.row() + 1) >> 1,
  626. (in.column() < 0 ? in.column() - 1 : in.column() + 1) >> 1
  627. };
  628. };
  629. auto round_mv_comp_q4 = [&](MotionVector in) {
  630. // return (value < 0 ? value - 2 : value + 2) / 4
  631. return MotionVector {
  632. (in.row() < 0 ? in.row() - 2 : in.row() + 2) >> 2,
  633. (in.column() < 0 ? in.column() - 2 : in.column() + 2) >> 2
  634. };
  635. };
  636. auto vectors = block_context.sub_block_motion_vectors;
  637. // The motion vector array mv is derived as follows:
  638. // − If plane is equal to 0, or MiSize is greater than or equal to BLOCK_8X8, mv is set equal to
  639. // BlockMvs[ refList ][ blockIdx ].
  640. if (plane == 0 || block_context.size >= Block_8x8)
  641. return vectors[block_index][reference_index];
  642. // − Otherwise, if subsampling_x is equal to 0 and subsampling_y is equal to 0, mv is set equal to
  643. // BlockMvs[ refList ][ blockIdx ].
  644. if (!block_context.frame_context.color_config.subsampling_x && !block_context.frame_context.color_config.subsampling_y)
  645. return vectors[block_index][reference_index];
  646. // − Otherwise, if subsampling_x is equal to 0 and subsampling_y is equal to 1, mv[ comp ] is set equal to
  647. // round_mv_comp_q2( BlockMvs[ refList ][ blockIdx ][ comp ] + BlockMvs[ refList ][ blockIdx + 2 ][ comp ] )
  648. // for comp = 0..1.
  649. if (!block_context.frame_context.color_config.subsampling_x && block_context.frame_context.color_config.subsampling_y)
  650. return round_mv_comp_q2(vectors[block_index][reference_index] + vectors[block_index + 2][reference_index]);
  651. // − Otherwise, if subsampling_x is equal to 1 and subsampling_y is equal to 0, mv[ comp ] is set equal to
  652. // round_mv_comp_q2( BlockMvs[ refList ][ blockIdx ][ comp ] + BlockMvs[ refList ][ blockIdx + 1 ][ comp ] )
  653. // for comp = 0..1.
  654. if (block_context.frame_context.color_config.subsampling_x && !block_context.frame_context.color_config.subsampling_y)
  655. return round_mv_comp_q2(vectors[block_index][reference_index] + vectors[block_index + 1][reference_index]);
  656. // − Otherwise, (subsampling_x is equal to 1 and subsampling_y is equal to 1), mv[ comp ] is set equal to
  657. // round_mv_comp_q4( BlockMvs[ refList ][ 0 ][ comp ] + BlockMvs[ refList ][ 1 ][ comp ] +
  658. // BlockMvs[ refList ][ 2 ][ comp ] + BlockMvs[ refList ][ 3 ][ comp ] ) for comp = 0..1.
  659. VERIFY(block_context.frame_context.color_config.subsampling_x && block_context.frame_context.color_config.subsampling_y);
  660. return round_mv_comp_q4(vectors[0][reference_index] + vectors[1][reference_index]
  661. + vectors[2][reference_index] + vectors[3][reference_index]);
  662. }
  663. MotionVector Decoder::clamp_motion_vector(u8 plane, BlockContext const& block_context, u32 block_row, u32 block_column, MotionVector vector)
  664. {
  665. // FIXME: This function is named very similarly to Parser::clamp_mv. Rename one or the other?
  666. // The purpose of this process is to change the motion vector into the appropriate precision for the current plane
  667. // and to clamp motion vectors that go too far off the edge of the frame.
  668. // The variables sx and sy are set equal to the subsampling for the current plane as follows:
  669. // − If plane is equal to 0, sx is set equal to 0 and sy is set equal to 0.
  670. // − Otherwise, sx is set equal to subsampling_x and sy is set equal to subsampling_y.
  671. bool subsampling_x = plane > 0 ? block_context.frame_context.color_config.subsampling_x : false;
  672. bool subsampling_y = plane > 0 ? block_context.frame_context.color_config.subsampling_y : false;
  673. // The output array clampedMv is specified by the following steps:
  674. i32 blocks_high = num_8x8_blocks_high_lookup[block_context.size];
  675. // Casts must be done here to prevent subtraction underflow from wrapping the values.
  676. i32 mb_to_top_edge = -(static_cast<i32>(block_row * MI_SIZE) * 16) >> subsampling_y;
  677. i32 mb_to_bottom_edge = (((static_cast<i32>(block_context.frame_context.rows()) - blocks_high - static_cast<i32>(block_row)) * MI_SIZE) * 16) >> subsampling_y;
  678. i32 blocks_wide = num_8x8_blocks_wide_lookup[block_context.size];
  679. i32 mb_to_left_edge = -(static_cast<i32>(block_column * MI_SIZE) * 16) >> subsampling_x;
  680. i32 mb_to_right_edge = (((static_cast<i32>(block_context.frame_context.columns()) - blocks_wide - static_cast<i32>(block_column)) * MI_SIZE) * 16) >> subsampling_x;
  681. i32 subpel_left = (INTERP_EXTEND + ((blocks_wide * MI_SIZE) >> subsampling_x)) << SUBPEL_BITS;
  682. i32 subpel_right = subpel_left - SUBPEL_SHIFTS;
  683. i32 subpel_top = (INTERP_EXTEND + ((blocks_high * MI_SIZE) >> subsampling_y)) << SUBPEL_BITS;
  684. i32 subpel_bottom = subpel_top - SUBPEL_SHIFTS;
  685. return {
  686. clip_3(mb_to_top_edge - subpel_top, mb_to_bottom_edge + subpel_bottom, (2 * vector.row()) >> subsampling_y),
  687. clip_3(mb_to_left_edge - subpel_left, mb_to_right_edge + subpel_right, (2 * vector.column()) >> subsampling_x)
  688. };
  689. }
  690. DecoderErrorOr<void> Decoder::predict_inter_block(u8 plane, BlockContext const& block_context, ReferenceIndex reference_index, u32 block_row, u32 block_column, u32 x, u32 y, u32 width, u32 height, u32 block_index, Span<u16> block_buffer)
  691. {
  692. VERIFY(width <= maximum_block_dimensions && height <= maximum_block_dimensions);
  693. // 2. The motion vector selection process in section 8.5.2.1 is invoked with plane, refList, blockIdx as inputs
  694. // and the output being the motion vector mv.
  695. auto motion_vector = select_motion_vector(plane, block_context, reference_index, block_index);
  696. // 3. The motion vector clamping process in section 8.5.2.2 is invoked with plane, mv as inputs and the output
  697. // being the clamped motion vector clampedMv
  698. auto clamped_vector = clamp_motion_vector(plane, block_context, block_row, block_column, motion_vector);
  699. // 4. The motion vector scaling process in section 8.5.2.3 is invoked with plane, refList, x, y, clampedMv as
  700. // inputs and the output being the initial location startX, startY, and the step sizes stepX, stepY.
  701. // 8.5.2.3 Motion vector scaling process
  702. // The inputs to this process are:
  703. // − a variable plane specifying which plane is being predicted,
  704. // − a variable refList specifying that we should scale to match reference frame ref_frame[ refList ],
  705. // − variables x and y specifying the location of the top left sample in the CurrFrame[ plane ] array of the region
  706. // to be predicted,
  707. // − a variable clampedMv specifying the clamped motion vector.
  708. // The outputs of this process are the variables startX and startY giving the reference block location in units of
  709. // 1/16 th of a sample, and variables xStep and yStep giving the step size in units of 1/16 th of a sample.
  710. // This process is responsible for computing the sampling locations in the reference frame based on the motion
  711. // vector. The sampling locations are also adjusted to compensate for any difference in the size of the reference
  712. // frame compared to the current frame.
  713. // A variable refIdx specifying which reference frame is being used is set equal to
  714. // ref_frame_idx[ ref_frame[ refList ] - LAST_FRAME ].
  715. auto reference_frame_index = block_context.frame_context.reference_frame_indices[block_context.reference_frame_types[reference_index] - LastFrame];
  716. // It is a requirement of bitstream conformance that all the following conditions are satisfied:
  717. // − 2 * FrameWidth >= RefFrameWidth[ refIdx ]
  718. // − 2 * FrameHeight >= RefFrameHeight[ refIdx ]
  719. // − FrameWidth <= 16 * RefFrameWidth[ refIdx ]
  720. // − FrameHeight <= 16 * RefFrameHeight[ refIdx ]
  721. if (m_parser->m_frame_store[reference_frame_index][plane].is_empty())
  722. return DecoderError::format(DecoderErrorCategory::Corrupted, "Attempted to use reference frame {} that has not been saved", reference_frame_index);
  723. auto ref_frame_size = m_parser->m_ref_frame_size[reference_frame_index];
  724. auto double_frame_size = block_context.frame_context.size().scaled_by(2);
  725. if (double_frame_size.width() < ref_frame_size.width() || double_frame_size.height() < ref_frame_size.height())
  726. return DecoderError::format(DecoderErrorCategory::Corrupted, "Inter frame size is too small relative to reference frame {}", reference_frame_index);
  727. if (!ref_frame_size.scaled_by(16).contains(block_context.frame_context.size()))
  728. return DecoderError::format(DecoderErrorCategory::Corrupted, "Inter frame size is too large relative to reference frame {}", reference_frame_index);
  729. // FIXME: Convert all the operations in this function to vector operations supported by
  730. // MotionVector.
  731. // A variable xScale is set equal to (RefFrameWidth[ refIdx ] << REF_SCALE_SHIFT) / FrameWidth.
  732. // A variable yScale is set equal to (RefFrameHeight[ refIdx ] << REF_SCALE_SHIFT) / FrameHeight.
  733. // (xScale and yScale specify the size of the reference frame relative to the current frame in units where 16 is
  734. // equivalent to the reference frame having the same size.)
  735. i32 x_scale = (ref_frame_size.width() << REF_SCALE_SHIFT) / block_context.frame_context.size().width();
  736. i32 y_scale = (ref_frame_size.height() << REF_SCALE_SHIFT) / block_context.frame_context.size().height();
  737. // The variable baseX is set equal to (x * xScale) >> REF_SCALE_SHIFT.
  738. // The variable baseY is set equal to (y * yScale) >> REF_SCALE_SHIFT.
  739. // (baseX and baseY specify the location of the block in the reference frame if a zero motion vector is used).
  740. i32 base_x = (x * x_scale) >> REF_SCALE_SHIFT;
  741. i32 base_y = (y * y_scale) >> REF_SCALE_SHIFT;
  742. // The variable lumaX is set equal to (plane > 0) ? x << subsampling_x : x.
  743. // The variable lumaY is set equal to (plane > 0) ? y << subsampling_y : y.
  744. // (lumaX and lumaY specify the location of the block to be predicted in the current frame in units of luma
  745. // samples.)
  746. bool subsampling_x = plane > 0 ? block_context.frame_context.color_config.subsampling_x : false;
  747. bool subsampling_y = plane > 0 ? block_context.frame_context.color_config.subsampling_y : false;
  748. i32 luma_x = x << subsampling_x;
  749. i32 luma_y = y << subsampling_y;
  750. // The variable fracX is set equal to ( (16 * lumaX * xScale) >> REF_SCALE_SHIFT) & SUBPEL_MASK.
  751. // The variable fracY is set equal to ( (16 * lumaY * yScale) >> REF_SCALE_SHIFT) & SUBPEL_MASK.
  752. i32 frac_x = ((16 * luma_x * x_scale) >> REF_SCALE_SHIFT) & SUBPEL_MASK;
  753. i32 frac_y = ((16 * luma_y * y_scale) >> REF_SCALE_SHIFT) & SUBPEL_MASK;
  754. // The variable dX is set equal to ( (clampedMv[ 1 ] * xScale) >> REF_SCALE_SHIFT) + fracX.
  755. // The variable dY is set equal to ( (clampedMv[ 0 ] * yScale) >> REF_SCALE_SHIFT) + fracY.
  756. // (dX and dY specify a scaled motion vector.)
  757. i32 scaled_vector_x = ((clamped_vector.column() * x_scale) >> REF_SCALE_SHIFT) + frac_x;
  758. i32 scaled_vector_y = ((clamped_vector.row() * y_scale) >> REF_SCALE_SHIFT) + frac_y;
  759. // The output variable stepX is set equal to (16 * xScale) >> REF_SCALE_SHIFT.
  760. // The output variable stepY is set equal to (16 * yScale) >> REF_SCALE_SHIFT.
  761. i32 scaled_step_x = (16 * x_scale) >> REF_SCALE_SHIFT;
  762. i32 scaled_step_y = (16 * y_scale) >> REF_SCALE_SHIFT;
  763. // The output variable startX is set equal to (baseX << SUBPEL_BITS) + dX.
  764. // The output variable startY is set equal to (baseY << SUBPEL_BITS) + dY.
  765. i32 offset_scaled_block_x = (base_x << SUBPEL_BITS) + scaled_vector_x;
  766. i32 offset_scaled_block_y = (base_y << SUBPEL_BITS) + scaled_vector_y;
  767. // 5. The block inter prediction process in section 8.5.2.4 is invoked with plane, refList, startX, startY, stepX,
  768. // stepY, w, h as inputs and the output is assigned to the 2D array preds[ refList ].
  769. // 8.5.2.4 Block inter prediction process
  770. // The inputs to this process are:
  771. // − a variable plane,
  772. // − a variable refList specifying that we should predict from ref_frame[ refList ],
  773. // − variables x and y giving the block location in units of 1/16 th of a sample,
  774. // − variables xStep and yStep giving the step size in units of 1/16 th of a sample. (These will be at most equal
  775. // to 80 due to the restrictions on scaling between reference frames.)
  776. static constexpr i32 MAX_SCALED_STEP = 80;
  777. VERIFY(scaled_step_x <= MAX_SCALED_STEP && scaled_step_y <= MAX_SCALED_STEP);
  778. // − variables w and h giving the width and height of the block in units of samples
  779. // The output from this process is the 2D array named pred containing inter predicted samples.
  780. // A variable ref specifying the reference frame contents is set equal to FrameStore[ refIdx ].
  781. auto& reference_frame_buffer = m_parser->m_frame_store[reference_frame_index][plane];
  782. auto reference_frame_width = m_parser->m_ref_frame_size[reference_frame_index].width() >> subsampling_x;
  783. auto reference_frame_buffer_at = [&](u32 row, u32 column) -> u16& {
  784. return reference_frame_buffer[row * reference_frame_width + column];
  785. };
  786. auto block_buffer_at = [&](u32 row, u32 column) -> u16& {
  787. return block_buffer[row * width + column];
  788. };
  789. // The variable lastX is set equal to ( (RefFrameWidth[ refIdx ] + subX) >> subX) - 1.
  790. // The variable lastY is set equal to ( (RefFrameHeight[ refIdx ] + subY) >> subY) - 1.
  791. // (lastX and lastY specify the coordinates of the bottom right sample of the reference plane.)
  792. i32 scaled_right = ((m_parser->m_ref_frame_size[reference_frame_index].width() + subsampling_x) >> subsampling_x) - 1;
  793. i32 scaled_bottom = ((m_parser->m_ref_frame_size[reference_frame_index].height() + subsampling_y) >> subsampling_y) - 1;
  794. // The variable intermediateHeight specifying the height required for the intermediate array is set equal to (((h -
  795. // 1) * yStep + 15) >> 4) + 8.
  796. static constexpr auto maximum_intermediate_height = (((maximum_block_dimensions - 1) * MAX_SCALED_STEP + 15) >> 4) + 8;
  797. auto intermediate_height = (((height - 1) * scaled_step_y + 15) >> 4) + 8;
  798. VERIFY(intermediate_height <= maximum_intermediate_height);
  799. // The sub-sample interpolation is effected via two one-dimensional convolutions. First a horizontal filter is used
  800. // to build up a temporary array, and then this array is vertically filtered to obtain the final prediction. The
  801. // fractional parts of the motion vectors determine the filtering process. If the fractional part is zero, then the
  802. // filtering is equivalent to a straight sample copy.
  803. // The filtering is applied as follows:
  804. // The array intermediate is specified as follows:
  805. // Note: Height is specified by `intermediate_height`, width is specified by `width`
  806. Array<u16, maximum_intermediate_height * maximum_block_dimensions> intermediate_buffer;
  807. auto intermediate_buffer_at = [&](u32 row, u32 column) -> u16& {
  808. return intermediate_buffer[row * width + column];
  809. };
  810. for (auto row = 0u; row < intermediate_height; row++) {
  811. for (auto column = 0u; column < width; column++) {
  812. auto samples_start = offset_scaled_block_x + static_cast<i32>(scaled_step_x * column);
  813. i32 accumulated_samples = 0;
  814. for (auto t = 0u; t < 8u; t++) {
  815. auto sample = reference_frame_buffer_at(
  816. clip_3(0, scaled_bottom, (offset_scaled_block_y >> 4) + static_cast<i32>(row) - 3),
  817. clip_3(0, scaled_right, (samples_start >> 4) + static_cast<i32>(t) - 3));
  818. accumulated_samples += subpel_filters[block_context.interpolation_filter][samples_start & 15][t] * sample;
  819. }
  820. intermediate_buffer_at(row, column) = clip_1(block_context.frame_context.color_config.bit_depth, round_2(accumulated_samples, 7));
  821. }
  822. }
  823. for (auto row = 0u; row < height; row++) {
  824. for (auto column = 0u; column < width; column++) {
  825. auto samples_start = (offset_scaled_block_y & 15) + static_cast<i32>(scaled_step_y * row);
  826. i32 accumulated_samples = 0;
  827. for (auto t = 0u; t < 8u; t++) {
  828. auto sample = intermediate_buffer_at((samples_start >> 4) + t, column);
  829. accumulated_samples += subpel_filters[block_context.interpolation_filter][samples_start & 15][t] * sample;
  830. }
  831. block_buffer_at(row, column) = clip_1(block_context.frame_context.color_config.bit_depth, round_2(accumulated_samples, 7));
  832. }
  833. }
  834. return {};
  835. }
  836. DecoderErrorOr<void> Decoder::predict_inter(u8 plane, BlockContext const& block_context, u32 x, u32 y, u32 width, u32 height, u32 block_index)
  837. {
  838. // The inter prediction process is invoked for inter coded blocks. When MiSize is smaller than BLOCK_8X8, the
  839. // prediction is done with a granularity of 4x4 samples, otherwise the whole plane is predicted at the same time.
  840. // The inputs to this process are:
  841. // − a variable plane specifying which plane is being predicted,
  842. // − variables x and y specifying the location of the top left sample in the CurrFrame[ plane ] array of the region
  843. // to be predicted,
  844. // − variables w and h specifying the width and height of the region to be predicted,
  845. // − a variable blockIdx, specifying how much of the block has already been predicted in units of 4x4 samples.
  846. // The outputs of this process are inter predicted samples in the current frame CurrFrame.
  847. // The prediction arrays are formed by the following ordered steps:
  848. // 1. The variable refList is set equal to 0.
  849. // 2. through 5.
  850. Array<u16, maximum_block_size> predicted_buffer;
  851. auto predicted_span = predicted_buffer.span().trim(width * height);
  852. TRY(predict_inter_block(plane, block_context, ReferenceIndex::Primary, block_context.row, block_context.column, x, y, width, height, block_index, predicted_span));
  853. auto predicted_buffer_at = [&](Span<u16> buffer, u32 row, u32 column) -> u16& {
  854. return buffer[row * width + column];
  855. };
  856. // 6. If isCompound is equal to 1, then the variable refList is set equal to 1 and steps 2, 3, 4 and 5 are repeated
  857. // to form the prediction for the second reference.
  858. // The inter predicted samples are then derived as follows:
  859. auto& frame_buffer = get_output_buffer(plane);
  860. VERIFY(!frame_buffer.is_empty());
  861. auto frame_width = (block_context.frame_context.columns() * 8u) >> (plane > 0 ? block_context.frame_context.color_config.subsampling_x : false);
  862. auto frame_height = (block_context.frame_context.rows() * 8u) >> (plane > 0 ? block_context.frame_context.color_config.subsampling_y : false);
  863. auto frame_buffer_at = [&](u32 row, u32 column) -> u16& {
  864. return frame_buffer[row * frame_width + column];
  865. };
  866. auto width_in_frame_buffer = min(width, frame_width - x);
  867. auto height_in_frame_buffer = min(height, frame_height - y);
  868. // The variable isCompound is set equal to ref_frame[ 1 ] > NONE.
  869. // − If isCompound is equal to 0, CurrFrame[ plane ][ y + i ][ x + j ] is set equal to preds[ 0 ][ i ][ j ] for i = 0..h-1
  870. // and j = 0..w-1.
  871. if (!block_context.is_compound()) {
  872. for (auto i = 0u; i < height_in_frame_buffer; i++) {
  873. for (auto j = 0u; j < width_in_frame_buffer; j++)
  874. frame_buffer_at(y + i, x + j) = predicted_buffer_at(predicted_span, i, j);
  875. }
  876. return {};
  877. }
  878. // − Otherwise, CurrFrame[ plane ][ y + i ][ x + j ] is set equal to Round2( preds[ 0 ][ i ][ j ] + preds[ 1 ][ i ][ j ], 1 )
  879. // for i = 0..h-1 and j = 0..w-1.
  880. Array<u16, maximum_block_size> second_predicted_buffer;
  881. auto second_predicted_span = second_predicted_buffer.span().trim(width * height);
  882. TRY(predict_inter_block(plane, block_context, ReferenceIndex::Secondary, block_context.row, block_context.column, x, y, width, height, block_index, second_predicted_span));
  883. for (auto i = 0u; i < height_in_frame_buffer; i++) {
  884. for (auto j = 0u; j < width_in_frame_buffer; j++)
  885. frame_buffer_at(y + i, x + j) = round_2(predicted_buffer_at(predicted_span, i, j) + predicted_buffer_at(second_predicted_span, i, j), 1);
  886. }
  887. return {};
  888. }
  889. inline u16 dc_q(u8 bit_depth, u8 b)
  890. {
  891. // The function dc_q( b ) is specified as dc_qlookup[ (BitDepth-8) >> 1 ][ Clip3( 0, 255, b ) ] where dc_lookup is
  892. // defined as follows:
  893. constexpr u16 dc_qlookup[3][256] = {
  894. { 4, 8, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 23, 24, 25, 26, 26, 27, 28, 29, 30, 31, 32, 32, 33, 34, 35, 36, 37, 38, 38, 39, 40, 41, 42, 43, 43, 44, 45, 46, 47, 48, 48, 49, 50, 51, 52, 53, 53, 54, 55, 56, 57, 57, 58, 59, 60, 61, 62, 62, 63, 64, 65, 66, 66, 67, 68, 69, 70, 70, 71, 72, 73, 74, 74, 75, 76, 77, 78, 78, 79, 80, 81, 81, 82, 83, 84, 85, 85, 87, 88, 90, 92, 93, 95, 96, 98, 99, 101, 102, 104, 105, 107, 108, 110, 111, 113, 114, 116, 117, 118, 120, 121, 123, 125, 127, 129, 131, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 161, 164, 166, 169, 172, 174, 177, 180, 182, 185, 187, 190, 192, 195, 199, 202, 205, 208, 211, 214, 217, 220, 223, 226, 230, 233, 237, 240, 243, 247, 250, 253, 257, 261, 265, 269, 272, 276, 280, 284, 288, 292, 296, 300, 304, 309, 313, 317, 322, 326, 330, 335, 340, 344, 349, 354, 359, 364, 369, 374, 379, 384, 389, 395, 400, 406, 411, 417, 423, 429, 435, 441, 447, 454, 461, 467, 475, 482, 489, 497, 505, 513, 522, 530, 539, 549, 559, 569, 579, 590, 602, 614, 626, 640, 654, 668, 684, 700, 717, 736, 755, 775, 796, 819, 843, 869, 896, 925, 955, 988, 1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336 },
  895. { 4, 9, 10, 13, 15, 17, 20, 22, 25, 28, 31, 34, 37, 40, 43, 47, 50, 53, 57, 60, 64, 68, 71, 75, 78, 82, 86, 90, 93, 97, 101, 105, 109, 113, 116, 120, 124, 128, 132, 136, 140, 143, 147, 151, 155, 159, 163, 166, 170, 174, 178, 182, 185, 189, 193, 197, 200, 204, 208, 212, 215, 219, 223, 226, 230, 233, 237, 241, 244, 248, 251, 255, 259, 262, 266, 269, 273, 276, 280, 283, 287, 290, 293, 297, 300, 304, 307, 310, 314, 317, 321, 324, 327, 331, 334, 337, 343, 350, 356, 362, 369, 375, 381, 387, 394, 400, 406, 412, 418, 424, 430, 436, 442, 448, 454, 460, 466, 472, 478, 484, 490, 499, 507, 516, 525, 533, 542, 550, 559, 567, 576, 584, 592, 601, 609, 617, 625, 634, 644, 655, 666, 676, 687, 698, 708, 718, 729, 739, 749, 759, 770, 782, 795, 807, 819, 831, 844, 856, 868, 880, 891, 906, 920, 933, 947, 961, 975, 988, 1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105, 1120, 1137, 1153, 1170, 1186, 1202, 1218, 1236, 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379, 1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537, 1559, 1580, 1601, 1624, 1647, 1670, 1692, 1717, 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929, 1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197, 2236, 2276, 2319, 2363, 2410, 2458, 2508, 2561, 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102, 3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953, 4089, 4236, 4394, 4559, 4737, 4929, 5130, 5347 },
  896. { 4, 12, 18, 25, 33, 41, 50, 60, 70, 80, 91, 103, 115, 127, 140, 153, 166, 180, 194, 208, 222, 237, 251, 266, 281, 296, 312, 327, 343, 358, 374, 390, 405, 421, 437, 453, 469, 484, 500, 516, 532, 548, 564, 580, 596, 611, 627, 643, 659, 674, 690, 706, 721, 737, 752, 768, 783, 798, 814, 829, 844, 859, 874, 889, 904, 919, 934, 949, 964, 978, 993, 1008, 1022, 1037, 1051, 1065, 1080, 1094, 1108, 1122, 1136, 1151, 1165, 1179, 1192, 1206, 1220, 1234, 1248, 1261, 1275, 1288, 1302, 1315, 1329, 1342, 1368, 1393, 1419, 1444, 1469, 1494, 1519, 1544, 1569, 1594, 1618, 1643, 1668, 1692, 1717, 1741, 1765, 1789, 1814, 1838, 1862, 1885, 1909, 1933, 1957, 1992, 2027, 2061, 2096, 2130, 2165, 2199, 2233, 2267, 2300, 2334, 2367, 2400, 2434, 2467, 2499, 2532, 2575, 2618, 2661, 2704, 2746, 2788, 2830, 2872, 2913, 2954, 2995, 3036, 3076, 3127, 3177, 3226, 3275, 3324, 3373, 3421, 3469, 3517, 3565, 3621, 3677, 3733, 3788, 3843, 3897, 3951, 4005, 4058, 4119, 4181, 4241, 4301, 4361, 4420, 4479, 4546, 4612, 4677, 4742, 4807, 4871, 4942, 5013, 5083, 5153, 5222, 5291, 5367, 5442, 5517, 5591, 5665, 5745, 5825, 5905, 5984, 6063, 6149, 6234, 6319, 6404, 6495, 6587, 6678, 6769, 6867, 6966, 7064, 7163, 7269, 7376, 7483, 7599, 7715, 7832, 7958, 8085, 8214, 8352, 8492, 8635, 8788, 8945, 9104, 9275, 9450, 9639, 9832, 10031, 10245, 10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409, 12750, 13118, 13501, 13913, 14343, 14807, 15290, 15812, 16356, 16943, 17575, 18237, 18949, 19718, 20521, 21387 }
  897. };
  898. return dc_qlookup[(bit_depth - 8) >> 1][clip_3<u8>(0, 255, b)];
  899. }
  900. inline u16 ac_q(u8 bit_depth, u8 b)
  901. {
  902. // The function ac_q( b ) is specified as ac_qlookup[ (BitDepth-8) >> 1 ][ Clip3( 0, 255, b ) ] where ac_lookup is
  903. // defined as follows:
  904. constexpr u16 ac_qlookup[3][256] = {
  905. { 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 155, 158, 161, 164, 167, 170, 173, 176, 179, 182, 185, 188, 191, 194, 197, 200, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, 255, 260, 265, 270, 275, 280, 285, 290, 295, 300, 305, 311, 317, 323, 329, 335, 341, 347, 353, 359, 366, 373, 380, 387, 394, 401, 408, 416, 424, 432, 440, 448, 456, 465, 474, 483, 492, 501, 510, 520, 530, 540, 550, 560, 571, 582, 593, 604, 615, 627, 639, 651, 663, 676, 689, 702, 715, 729, 743, 757, 771, 786, 801, 816, 832, 848, 864, 881, 898, 915, 933, 951, 969, 988, 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151, 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343, 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828 },
  906. { 4, 9, 11, 13, 16, 18, 21, 24, 27, 30, 33, 37, 40, 44, 48, 51, 55, 59, 63, 67, 71, 75, 79, 83, 88, 92, 96, 100, 105, 109, 114, 118, 122, 127, 131, 136, 140, 145, 149, 154, 158, 163, 168, 172, 177, 181, 186, 190, 195, 199, 204, 208, 213, 217, 222, 226, 231, 235, 240, 244, 249, 253, 258, 262, 267, 271, 275, 280, 284, 289, 293, 297, 302, 306, 311, 315, 319, 324, 328, 332, 337, 341, 345, 349, 354, 358, 362, 367, 371, 375, 379, 384, 388, 392, 396, 401, 409, 417, 425, 433, 441, 449, 458, 466, 474, 482, 490, 498, 506, 514, 523, 531, 539, 547, 555, 563, 571, 579, 588, 596, 604, 616, 628, 640, 652, 664, 676, 688, 700, 713, 725, 737, 749, 761, 773, 785, 797, 809, 825, 841, 857, 873, 889, 905, 922, 938, 954, 970, 986, 1002, 1018, 1038, 1058, 1078, 1098, 1118, 1138, 1158, 1178, 1198, 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386, 1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603, 1631, 1663, 1695, 1727, 1759, 1791, 1823, 1859, 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159, 2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507, 2555, 2603, 2651, 2703, 2755, 2807, 2859, 2915, 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391, 3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952, 4028, 4104, 4184, 4264, 4348, 4432, 4516, 4604, 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372, 5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268, 6388, 6512, 6640, 6768, 6900, 7036, 7172, 7312 },
  907. { 4, 13, 19, 27, 35, 44, 54, 64, 75, 87, 99, 112, 126, 139, 154, 168, 183, 199, 214, 230, 247, 263, 280, 297, 314, 331, 349, 366, 384, 402, 420, 438, 456, 475, 493, 511, 530, 548, 567, 586, 604, 623, 642, 660, 679, 698, 716, 735, 753, 772, 791, 809, 828, 846, 865, 884, 902, 920, 939, 957, 976, 994, 1012, 1030, 1049, 1067, 1085, 1103, 1121, 1139, 1157, 1175, 1193, 1211, 1229, 1246, 1264, 1282, 1299, 1317, 1335, 1352, 1370, 1387, 1405, 1422, 1440, 1457, 1474, 1491, 1509, 1526, 1543, 1560, 1577, 1595, 1627, 1660, 1693, 1725, 1758, 1791, 1824, 1856, 1889, 1922, 1954, 1987, 2020, 2052, 2085, 2118, 2150, 2183, 2216, 2248, 2281, 2313, 2346, 2378, 2411, 2459, 2508, 2556, 2605, 2653, 2701, 2750, 2798, 2847, 2895, 2943, 2992, 3040, 3088, 3137, 3185, 3234, 3298, 3362, 3426, 3491, 3555, 3619, 3684, 3748, 3812, 3876, 3941, 4005, 4069, 4149, 4230, 4310, 4390, 4470, 4550, 4631, 4711, 4791, 4871, 4967, 5064, 5160, 5256, 5352, 5448, 5544, 5641, 5737, 5849, 5961, 6073, 6185, 6297, 6410, 6522, 6650, 6778, 6906, 7034, 7162, 7290, 7435, 7579, 7723, 7867, 8011, 8155, 8315, 8475, 8635, 8795, 8956, 9132, 9308, 9484, 9660, 9836, 10028, 10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661, 11885, 12109, 12333, 12573, 12813, 13053, 13309, 13565, 13821, 14093, 14365, 14637, 14925, 15213, 15502, 15806, 16110, 16414, 16734, 17054, 17390, 17726, 18062, 18414, 18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486, 21902, 22334, 22766, 23214, 23662, 24126, 24590, 25070, 25551, 26047, 26559, 27071, 27599, 28143, 28687, 29247 }
  908. };
  909. return ac_qlookup[(bit_depth - 8) >> 1][clip_3<u8>(0, 255, b)];
  910. }
  911. u8 Decoder::get_base_quantizer_index(BlockContext const& block_context)
  912. {
  913. // The function get_qindex( ) returns the quantizer index for the current block and is specified by the following:
  914. // − If seg_feature_active( SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply:
  915. if (Parser::seg_feature_active(block_context, SEG_LVL_ALT_Q)) {
  916. // 1. Set the variable data equal to FeatureData[ segment_id ][ SEG_LVL_ALT_Q ].
  917. auto data = block_context.frame_context.segmentation_features[block_context.segment_id][SEG_LVL_ALT_Q].value;
  918. // 2. If segmentation_abs_or_delta_update is equal to 0, set data equal to base_q_idx + data
  919. if (!block_context.frame_context.should_use_absolute_segment_base_quantizer) {
  920. data += block_context.frame_context.base_quantizer_index;
  921. }
  922. // 3. Return Clip3( 0, 255, data ).
  923. return clip_3<u8>(0, 255, data);
  924. }
  925. // − Otherwise, return base_q_idx.
  926. return block_context.frame_context.base_quantizer_index;
  927. }
  928. u16 Decoder::get_dc_quantizer(BlockContext const& block_context, u8 plane)
  929. {
  930. // FIXME: The result of this function can be cached. This does not change per frame.
  931. // The function get_dc_quant( plane ) returns the quantizer value for the dc coefficient for a particular plane and
  932. // is derived as follows:
  933. // − If plane is equal to 0, return dc_q( get_qindex( ) + delta_q_y_dc ).
  934. // − Otherwise, return dc_q( get_qindex( ) + delta_q_uv_dc ).
  935. // Instead of if { return }, select the value to add and return.
  936. i8 offset = plane == 0 ? block_context.frame_context.y_dc_quantizer_index_delta : block_context.frame_context.uv_dc_quantizer_index_delta;
  937. return dc_q(block_context.frame_context.color_config.bit_depth, static_cast<u8>(get_base_quantizer_index(block_context) + offset));
  938. }
  939. u16 Decoder::get_ac_quantizer(BlockContext const& block_context, u8 plane)
  940. {
  941. // FIXME: The result of this function can be cached. This does not change per frame.
  942. // The function get_ac_quant( plane ) returns the quantizer value for the ac coefficient for a particular plane and
  943. // is derived as follows:
  944. // − If plane is equal to 0, return ac_q( get_qindex( ) ).
  945. // − Otherwise, return ac_q( get_qindex( ) + delta_q_uv_ac ).
  946. // Instead of if { return }, select the value to add and return.
  947. i8 offset = plane == 0 ? 0 : block_context.frame_context.uv_ac_quantizer_index_delta;
  948. return ac_q(block_context.frame_context.color_config.bit_depth, static_cast<u8>(get_base_quantizer_index(block_context) + offset));
  949. }
  950. DecoderErrorOr<void> Decoder::reconstruct(u8 plane, BlockContext const& block_context, u32 transform_block_x, u32 transform_block_y, TXSize transform_block_size, u8 transform_type)
  951. {
  952. // 8.6.2 Reconstruct process
  953. // The variable dqDenom is set equal to 2 if txSz is equal to TX_32X32, otherwise dqDenom is set equal to 1.
  954. Intermediate dq_denominator = transform_block_size == TX_32x32 ? 2 : 1;
  955. // The variable n (specifying the base 2 logarithm of the width of the transform block) is set equal to 2 + txSz.
  956. u8 log2_of_block_size = 2u + transform_block_size;
  957. // The variable n0 (specifying the width of the transform block) is set equal to 1 << n.
  958. auto block_size = 1u << log2_of_block_size;
  959. // 1. Dequant[ i ][ j ] is set equal to ( Tokens[ i * n0 + j ] * get_ac_quant( plane ) ) / dqDenom
  960. // for i = 0..(n0-1), for j = 0..(n0-1)
  961. Array<Intermediate, maximum_transform_size> dequantized;
  962. Intermediate ac_quant = get_ac_quantizer(block_context, plane);
  963. for (auto i = 0u; i < block_size; i++) {
  964. for (auto j = 0u; j < block_size; j++) {
  965. auto index = index_from_row_and_column(i, j, block_size);
  966. if (index == 0)
  967. continue;
  968. dequantized[index] = (m_parser->m_tokens[index] * ac_quant) / dq_denominator;
  969. }
  970. }
  971. // 2. Dequant[ 0 ][ 0 ] is set equal to ( Tokens[ 0 ] * get_dc_quant( plane ) ) / dqDenom
  972. dequantized[0] = (m_parser->m_tokens[0] * get_dc_quantizer(block_context, plane)) / dq_denominator;
  973. // It is a requirement of bitstream conformance that the values written into the Dequant array in steps 1 and 2
  974. // are representable by a signed integer with 8 + BitDepth bits.
  975. for (auto i = 0u; i < block_size * block_size; i++)
  976. VERIFY(check_intermediate_bounds(block_context.frame_context.color_config.bit_depth, dequantized[i]));
  977. // 3. Invoke the 2D inverse transform block process defined in section 8.7.2 with the variable n as input.
  978. // The inverse transform outputs are stored back to the Dequant buffer.
  979. TRY(inverse_transform_2d(block_context, dequantized, log2_of_block_size, transform_type));
  980. // 4. CurrFrame[ plane ][ y + i ][ x + j ] is set equal to Clip1( CurrFrame[ plane ][ y + i ][ x + j ] + Dequant[ i ][ j ] )
  981. // for i = 0..(n0-1) and j = 0..(n0-1).
  982. auto& current_buffer = get_output_buffer(plane);
  983. auto subsampling_x = (plane > 0 ? block_context.frame_context.color_config.subsampling_x : 0);
  984. auto subsampling_y = (plane > 0 ? block_context.frame_context.color_config.subsampling_y : 0);
  985. auto frame_width = (block_context.frame_context.columns() * 8) >> subsampling_x;
  986. auto frame_height = (block_context.frame_context.rows() * 8) >> subsampling_y;
  987. auto width_in_frame_buffer = min(block_size, frame_width - transform_block_x);
  988. auto height_in_frame_buffer = min(block_size, frame_height - transform_block_y);
  989. for (auto i = 0u; i < height_in_frame_buffer; i++) {
  990. for (auto j = 0u; j < width_in_frame_buffer; j++) {
  991. auto index = index_from_row_and_column(transform_block_y + i, transform_block_x + j, frame_width);
  992. auto dequantized_value = dequantized[index_from_row_and_column(i, j, block_size)];
  993. current_buffer[index] = clip_1(block_context.frame_context.color_config.bit_depth, current_buffer[index] + dequantized_value);
  994. }
  995. }
  996. return {};
  997. }
  998. inline DecoderErrorOr<void> Decoder::inverse_walsh_hadamard_transform(Span<Intermediate> data, u8 log2_of_block_size, u8 shift)
  999. {
  1000. (void)data;
  1001. (void)shift;
  1002. // The input to this process is a variable shift that specifies the amount of pre-scaling.
  1003. // This process does an in-place transform of the array T (of length 4) by the following ordered steps:
  1004. if (1 << log2_of_block_size != 4)
  1005. return DecoderError::corrupted("Block size was not 4"sv);
  1006. return DecoderError::not_implemented();
  1007. }
  1008. inline i32 Decoder::cos64(u8 angle)
  1009. {
  1010. const i32 cos64_lookup[33] = { 16384, 16364, 16305, 16207, 16069, 15893, 15679, 15426, 15137, 14811, 14449, 14053, 13623, 13160, 12665, 12140, 11585, 11003, 10394, 9760, 9102, 8423, 7723, 7005, 6270, 5520, 4756, 3981, 3196, 2404, 1606, 804, 0 };
  1011. // 1. Set a variable angle2 equal to angle & 127.
  1012. angle &= 127;
  1013. // 2. If angle2 is greater than or equal to 0 and less than or equal to 32, return cos64_lookup[ angle2 ].
  1014. if (angle <= 32)
  1015. return cos64_lookup[angle];
  1016. // 3. If angle2 is greater than 32 and less than or equal to 64, return cos64_lookup[ 64 - angle2 ] * -1.
  1017. if (angle <= 64)
  1018. return -cos64_lookup[64 - angle];
  1019. // 4. If angle2 is greater than 64 and less than or equal to 96, return cos64_lookup[ angle2 - 64 ] * -1.
  1020. if (angle <= 96)
  1021. return -cos64_lookup[angle - 64];
  1022. // 5. Otherwise (if angle2 is greater than 96 and less than 128), return cos64_lookup[ 128 - angle2 ].
  1023. return cos64_lookup[128 - angle];
  1024. }
  1025. inline i32 Decoder::sin64(u8 angle)
  1026. {
  1027. if (angle < 32)
  1028. angle += 128;
  1029. return cos64(angle - 32u);
  1030. }
  1031. template<typename T>
  1032. inline i32 Decoder::round_2(T value, u8 bits)
  1033. {
  1034. value = (value + static_cast<T>(1u << (bits - 1u))) >> bits;
  1035. return static_cast<i32>(value);
  1036. }
  1037. inline bool check_bounds(i64 value, u8 bits)
  1038. {
  1039. i64 const maximum = (1ll << (bits - 1ll)) - 1ll;
  1040. return value >= ~maximum && value <= maximum;
  1041. }
  1042. inline bool Decoder::check_intermediate_bounds(u8 bit_depth, Intermediate value)
  1043. {
  1044. i32 maximum = (1 << (8 + bit_depth - 1)) - 1;
  1045. return value >= ~maximum && value <= maximum;
  1046. }
  1047. // (8.7.1.1) The function B( a, b, angle, 0 ) performs a butterfly rotation.
  1048. inline void Decoder::butterfly_rotation_in_place(u8 bit_depth, Span<Intermediate> data, size_t index_a, size_t index_b, u8 angle, bool flip)
  1049. {
  1050. auto cos = cos64(angle);
  1051. auto sin = sin64(angle);
  1052. // 1. The variable x is set equal to T[ a ] * cos64( angle ) - T[ b ] * sin64( angle ).
  1053. i64 rotated_a = data[index_a] * cos - data[index_b] * sin;
  1054. // 2. The variable y is set equal to T[ a ] * sin64( angle ) + T[ b ] * cos64( angle ).
  1055. i64 rotated_b = data[index_a] * sin + data[index_b] * cos;
  1056. // 3. T[ a ] is set equal to Round2( x, 14 ).
  1057. data[index_a] = round_2(rotated_a, 14);
  1058. // 4. T[ b ] is set equal to Round2( y, 14 ).
  1059. data[index_b] = round_2(rotated_b, 14);
  1060. // The function B( a ,b, angle, 1 ) performs a butterfly rotation and flip specified by the following ordered steps:
  1061. // 1. The function B( a, b, angle, 0 ) is invoked.
  1062. // 2. The contents of T[ a ] and T[ b ] are exchanged.
  1063. if (flip)
  1064. swap(data[index_a], data[index_b]);
  1065. // It is a requirement of bitstream conformance that the values saved into the array T by this function are
  1066. // representable by a signed integer using 8 + BitDepth bits of precision.
  1067. VERIFY(check_intermediate_bounds(bit_depth, data[index_a]));
  1068. VERIFY(check_intermediate_bounds(bit_depth, data[index_b]));
  1069. }
  1070. // (8.7.1.1) The function H( a, b, 0 ) performs a Hadamard rotation.
  1071. inline void Decoder::hadamard_rotation_in_place(u8 bit_depth, Span<Intermediate> data, size_t index_a, size_t index_b, bool flip)
  1072. {
  1073. // The function H( a, b, 1 ) performs a Hadamard rotation with flipped indices and is specified as follows:
  1074. // 1. The function H( b, a, 0 ) is invoked.
  1075. if (flip)
  1076. swap(index_a, index_b);
  1077. // The function H( a, b, 0 ) performs a Hadamard rotation specified by the following ordered steps:
  1078. // 1. The variable x is set equal to T[ a ].
  1079. auto a_value = data[index_a];
  1080. // 2. The variable y is set equal to T[ b ].
  1081. auto b_value = data[index_b];
  1082. // 3. T[ a ] is set equal to x + y.
  1083. data[index_a] = a_value + b_value;
  1084. // 4. T[ b ] is set equal to x - y.
  1085. data[index_b] = a_value - b_value;
  1086. // It is a requirement of bitstream conformance that the values saved into the array T by this function are
  1087. // representable by a signed integer using 8 + BitDepth bits of precision.
  1088. VERIFY(check_intermediate_bounds(bit_depth, data[index_a]));
  1089. VERIFY(check_intermediate_bounds(bit_depth, data[index_b]));
  1090. }
  1091. inline DecoderErrorOr<void> Decoder::inverse_discrete_cosine_transform_array_permutation(Span<Intermediate> data, u8 log2_of_block_size)
  1092. {
  1093. u8 block_size = 1 << log2_of_block_size;
  1094. // This process performs an in-place permutation of the array T of length 2^n for 2 ≤ n ≤ 5 which is required before
  1095. // execution of the inverse DCT process.
  1096. if (log2_of_block_size < 2 || log2_of_block_size > 5)
  1097. return DecoderError::corrupted("Block size was out of range"sv);
  1098. // 1.1. A temporary array named copyT is set equal to T.
  1099. Array<Intermediate, maximum_transform_size> data_copy;
  1100. AK::TypedTransfer<Intermediate>::copy(data_copy.data(), data.data(), block_size);
  1101. // 1.2. T[ i ] is set equal to copyT[ brev( n, i ) ] for i = 0..((1<<n) - 1).
  1102. for (auto i = 0u; i < block_size; i++)
  1103. data[i] = data_copy[brev(log2_of_block_size, i)];
  1104. return {};
  1105. }
  1106. inline DecoderErrorOr<void> Decoder::inverse_discrete_cosine_transform(u8 bit_depth, Span<Intermediate> data, u8 log2_of_block_size)
  1107. {
  1108. // 2.1. The variable n0 is set equal to 1<<n.
  1109. u8 block_size = 1 << log2_of_block_size;
  1110. // 8.7.1.3 Inverse DCT process
  1111. // 2.2. The variable n1 is set equal to 1<<(n-1).
  1112. u8 half_block_size = block_size >> 1;
  1113. // 2.3 The variable n2 is set equal to 1<<(n-2).
  1114. u8 quarter_block_size = half_block_size >> 1;
  1115. // 2.4 The variable n3 is set equal to 1<<(n-3).
  1116. u8 eighth_block_size = quarter_block_size >> 1;
  1117. // 2.5 If n is equal to 2, invoke B( 0, 1, 16, 1 ), otherwise recursively invoke the inverse DCT defined in this
  1118. // section with the variable n set equal to n - 1.
  1119. if (log2_of_block_size == 2)
  1120. butterfly_rotation_in_place(bit_depth, data, 0, 1, 16, true);
  1121. else
  1122. TRY(inverse_discrete_cosine_transform(bit_depth, data, log2_of_block_size - 1));
  1123. // 2.6 Invoke B( n1+i, n0-1-i, 32-brev( 5, n1+i), 0 ) for i = 0..(n2-1).
  1124. for (auto i = 0u; i < quarter_block_size; i++) {
  1125. auto index = half_block_size + i;
  1126. butterfly_rotation_in_place(bit_depth, data, index, block_size - 1 - i, 32 - brev(5, index), false);
  1127. }
  1128. // 2.7 If n is greater than or equal to 3:
  1129. if (log2_of_block_size >= 3) {
  1130. // a. Invoke H( n1+4*i+2*j, n1+1+4*i+2*j, j ) for i = 0..(n3-1), j = 0..1.
  1131. for (auto i = 0u; i < eighth_block_size; i++) {
  1132. for (auto j = 0u; j < 2; j++) {
  1133. auto index = half_block_size + (4 * i) + (2 * j);
  1134. hadamard_rotation_in_place(bit_depth, data, index, index + 1, j);
  1135. }
  1136. }
  1137. }
  1138. // 4. If n is equal to 5:
  1139. if (log2_of_block_size == 5) {
  1140. // a. Invoke B( n0-n+3-n2*j-4*i, n1+n-4+n2*j+4*i, 28-16*i+56*j, 1 ) for i = 0..1, j = 0..1.
  1141. for (auto i = 0u; i < 2; i++) {
  1142. for (auto j = 0u; j < 2; j++) {
  1143. auto index_a = block_size - log2_of_block_size + 3 - (quarter_block_size * j) - (4 * i);
  1144. auto index_b = half_block_size + log2_of_block_size - 4 + (quarter_block_size * j) + (4 * i);
  1145. auto angle = 28 - (16 * i) + (56 * j);
  1146. butterfly_rotation_in_place(bit_depth, data, index_a, index_b, angle, true);
  1147. }
  1148. }
  1149. // b. Invoke H( n1+n3*j+i, n1+n2-5+n3*j-i, j&1 ) for i = 0..1, j = 0..3.
  1150. for (auto i = 0u; i < 2; i++) {
  1151. for (auto j = 0u; j < 4; j++) {
  1152. auto index_a = half_block_size + (eighth_block_size * j) + i;
  1153. auto index_b = half_block_size + quarter_block_size - 5 + (eighth_block_size * j) - i;
  1154. hadamard_rotation_in_place(bit_depth, data, index_a, index_b, (j & 1) != 0);
  1155. }
  1156. }
  1157. }
  1158. // 5. If n is greater than or equal to 4:
  1159. if (log2_of_block_size >= 4) {
  1160. // a. Invoke B( n0-n+2-i-n2*j, n1+n-3+i+n2*j, 24+48*j, 1 ) for i = 0..(n==5), j = 0..1.
  1161. for (auto i = 0u; i <= (log2_of_block_size == 5); i++) {
  1162. for (auto j = 0u; j < 2; j++) {
  1163. auto index_a = block_size - log2_of_block_size + 2 - i - (quarter_block_size * j);
  1164. auto index_b = half_block_size + log2_of_block_size - 3 + i + (quarter_block_size * j);
  1165. butterfly_rotation_in_place(bit_depth, data, index_a, index_b, 24 + (48 * j), true);
  1166. }
  1167. }
  1168. // b. Invoke H( n1+n2*j+i, n1+n2-1+n2*j-i, j&1 ) for i = 0..(2n-7), j = 0..1.
  1169. for (auto i = 0u; i < (2 * log2_of_block_size) - 6u; i++) {
  1170. for (auto j = 0u; j < 2; j++) {
  1171. auto index_a = half_block_size + (quarter_block_size * j) + i;
  1172. auto index_b = half_block_size + quarter_block_size - 1 + (quarter_block_size * j) - i;
  1173. hadamard_rotation_in_place(bit_depth, data, index_a, index_b, (j & 1) != 0);
  1174. }
  1175. }
  1176. }
  1177. // 6. If n is greater than or equal to 3:
  1178. if (log2_of_block_size >= 3) {
  1179. // a. Invoke B( n0-n3-1-i, n1+n3+i, 16, 1 ) for i = 0..(n3-1).
  1180. for (auto i = 0u; i < eighth_block_size; i++) {
  1181. auto index_a = block_size - eighth_block_size - 1 - i;
  1182. auto index_b = half_block_size + eighth_block_size + i;
  1183. butterfly_rotation_in_place(bit_depth, data, index_a, index_b, 16, true);
  1184. }
  1185. }
  1186. // 7. Invoke H( i, n0-1-i, 0 ) for i = 0..(n1-1).
  1187. for (auto i = 0u; i < half_block_size; i++)
  1188. hadamard_rotation_in_place(bit_depth, data, i, block_size - 1 - i, false);
  1189. return {};
  1190. }
  1191. inline void Decoder::inverse_asymmetric_discrete_sine_transform_input_array_permutation(Span<Intermediate> data, u8 log2_of_block_size)
  1192. {
  1193. // The variable n0 is set equal to 1<<n.
  1194. auto block_size = 1u << log2_of_block_size;
  1195. // The variable n1 is set equal to 1<<(n-1).
  1196. // We can iterate by 2 at a time instead of taking half block size.
  1197. // A temporary array named copyT is set equal to T.
  1198. Array<Intermediate, maximum_transform_size> data_copy;
  1199. AK::TypedTransfer<Intermediate>::copy(data_copy.data(), data.data(), block_size);
  1200. // The values at even locations T[ 2 * i ] are set equal to copyT[ n0 - 1 - 2 * i ] for i = 0..(n1-1).
  1201. // The values at odd locations T[ 2 * i + 1 ] are set equal to copyT[ 2 * i ] for i = 0..(n1-1).
  1202. for (auto i = 0u; i < block_size; i += 2) {
  1203. data[i] = data_copy[block_size - 1 - i];
  1204. data[i + 1] = data_copy[i];
  1205. }
  1206. }
  1207. inline void Decoder::inverse_asymmetric_discrete_sine_transform_output_array_permutation(Span<Intermediate> data, u8 log2_of_block_size)
  1208. {
  1209. auto block_size = 1u << log2_of_block_size;
  1210. // A temporary array named copyT is set equal to T.
  1211. Array<Intermediate, maximum_transform_size> data_copy;
  1212. AK::TypedTransfer<Intermediate>::copy(data_copy.data(), data.data(), block_size);
  1213. // The permutation depends on n as follows:
  1214. if (log2_of_block_size == 4) {
  1215. // − If n is equal to 4,
  1216. // T[ 8*a + 4*b + 2*c + d ] is set equal to copyT[ 8*(d^c) + 4*(c^b) + 2*(b^a) + a ] for a = 0..1
  1217. // and b = 0..1 and c = 0..1 and d = 0..1.
  1218. for (auto a = 0u; a < 2; a++)
  1219. for (auto b = 0u; b < 2; b++)
  1220. for (auto c = 0u; c < 2; c++)
  1221. for (auto d = 0u; d < 2; d++)
  1222. data[(8 * a) + (4 * b) + (2 * c) + d] = data_copy[8 * (d ^ c) + 4 * (c ^ b) + 2 * (b ^ a) + a];
  1223. } else {
  1224. VERIFY(log2_of_block_size == 3);
  1225. // − Otherwise (n is equal to 3),
  1226. // T[ 4*a + 2*b + c ] is set equal to copyT[ 4*(c^b) + 2*(b^a) + a ] for a = 0..1 and
  1227. // b = 0..1 and c = 0..1.
  1228. for (auto a = 0u; a < 2; a++)
  1229. for (auto b = 0u; b < 2; b++)
  1230. for (auto c = 0u; c < 2; c++)
  1231. data[4 * a + 2 * b + c] = data_copy[4 * (c ^ b) + 2 * (b ^ a) + a];
  1232. }
  1233. }
  1234. inline void Decoder::inverse_asymmetric_discrete_sine_transform_4(u8 bit_depth, Span<Intermediate> data)
  1235. {
  1236. VERIFY(data.size() == 4);
  1237. const i64 sinpi_1_9 = 5283;
  1238. const i64 sinpi_2_9 = 9929;
  1239. const i64 sinpi_3_9 = 13377;
  1240. const i64 sinpi_4_9 = 15212;
  1241. // Steps are derived from pseudocode in (8.7.1.6):
  1242. // s0 = SINPI_1_9 * T[ 0 ]
  1243. i64 s0 = sinpi_1_9 * data[0];
  1244. // s1 = SINPI_2_9 * T[ 0 ]
  1245. i64 s1 = sinpi_2_9 * data[0];
  1246. // s2 = SINPI_3_9 * T[ 1 ]
  1247. i64 s2 = sinpi_3_9 * data[1];
  1248. // s3 = SINPI_4_9 * T[ 2 ]
  1249. i64 s3 = sinpi_4_9 * data[2];
  1250. // s4 = SINPI_1_9 * T[ 2 ]
  1251. i64 s4 = sinpi_1_9 * data[2];
  1252. // s5 = SINPI_2_9 * T[ 3 ]
  1253. i64 s5 = sinpi_2_9 * data[3];
  1254. // s6 = SINPI_4_9 * T[ 3 ]
  1255. i64 s6 = sinpi_4_9 * data[3];
  1256. // v = T[ 0 ] - T[ 2 ] + T[ 3 ]
  1257. // s7 = SINPI_3_9 * v
  1258. i64 s7 = sinpi_3_9 * (data[0] - data[2] + data[3]);
  1259. // x0 = s0 + s3 + s5
  1260. auto x0 = s0 + s3 + s5;
  1261. // x1 = s1 - s4 - s6
  1262. auto x1 = s1 - s4 - s6;
  1263. // x2 = s7
  1264. auto x2 = s7;
  1265. // x3 = s2
  1266. auto x3 = s2;
  1267. // s0 = x0 + x3
  1268. s0 = x0 + x3;
  1269. // s1 = x1 + x3
  1270. s1 = x1 + x3;
  1271. // s2 = x2
  1272. s2 = x2;
  1273. // s3 = x0 + x1 - x3
  1274. s3 = x0 + x1 - x3;
  1275. // T[ 0 ] = Round2( s0, 14 )
  1276. data[0] = round_2(s0, 14);
  1277. // T[ 1 ] = Round2( s1, 14 )
  1278. data[1] = round_2(s1, 14);
  1279. // T[ 2 ] = Round2( s2, 14 )
  1280. data[2] = round_2(s2, 14);
  1281. // T[ 3 ] = Round2( s3, 14 )
  1282. data[3] = round_2(s3, 14);
  1283. // (8.7.1.1) The inverse asymmetric discrete sine transforms also make use of an intermediate array named S.
  1284. // The values in this array require higher precision to avoid overflow. Using signed integers with 24 +
  1285. // BitDepth bits of precision is enough to avoid overflow.
  1286. const u8 bits = 24 + bit_depth;
  1287. VERIFY(check_bounds(data[0], bits));
  1288. VERIFY(check_bounds(data[1], bits));
  1289. VERIFY(check_bounds(data[2], bits));
  1290. VERIFY(check_bounds(data[3], bits));
  1291. }
  1292. // The function SB( a, b, angle, 0 ) performs a butterfly rotation.
  1293. // Spec defines the source as array T, and the destination array as S.
  1294. template<typename S, typename D>
  1295. inline void Decoder::butterfly_rotation(Span<S> source, Span<D> destination, size_t index_a, size_t index_b, u8 angle, bool flip)
  1296. {
  1297. // The function SB( a, b, angle, 0 ) performs a butterfly rotation according to the following ordered steps:
  1298. auto cos = cos64(angle);
  1299. auto sin = sin64(angle);
  1300. // Expand to the destination buffer's precision.
  1301. D a = source[index_a];
  1302. D b = source[index_b];
  1303. // 1. S[ a ] is set equal to T[ a ] * cos64( angle ) - T[ b ] * sin64( angle ).
  1304. destination[index_a] = a * cos - b * sin;
  1305. // 2. S[ b ] is set equal to T[ a ] * sin64( angle ) + T[ b ] * cos64( angle ).
  1306. destination[index_b] = a * sin + b * cos;
  1307. // The function SB( a, b, angle, 1 ) performs a butterfly rotation and flip according to the following ordered steps:
  1308. // 1. The function SB( a, b, angle, 0 ) is invoked.
  1309. // 2. The contents of S[ a ] and S[ b ] are exchanged.
  1310. if (flip)
  1311. swap(destination[index_a], destination[index_b]);
  1312. }
  1313. // The function SH( a, b ) performs a Hadamard rotation and rounding.
  1314. // Spec defines the source array as S, and the destination array as T.
  1315. template<typename S, typename D>
  1316. inline void Decoder::hadamard_rotation(Span<S> source, Span<D> destination, size_t index_a, size_t index_b)
  1317. {
  1318. // Keep the source buffer's precision until rounding.
  1319. S a = source[index_a];
  1320. S b = source[index_b];
  1321. // 1. T[ a ] is set equal to Round2( S[ a ] + S[ b ], 14 ).
  1322. destination[index_a] = round_2(a + b, 14);
  1323. // 2. T[ b ] is set equal to Round2( S[ a ] - S[ b ], 14 ).
  1324. destination[index_b] = round_2(a - b, 14);
  1325. }
  1326. inline DecoderErrorOr<void> Decoder::inverse_asymmetric_discrete_sine_transform_8(u8 bit_depth, Span<Intermediate> data)
  1327. {
  1328. VERIFY(data.size() == 8);
  1329. // This process does an in-place transform of the array T using:
  1330. // A higher precision array S for intermediate results.
  1331. Array<i64, 8> high_precision_temp;
  1332. // The following ordered steps apply:
  1333. // 1. Invoke the ADST input array permutation process specified in section 8.7.1.4 with the input variable n set
  1334. // equal to 3.
  1335. inverse_asymmetric_discrete_sine_transform_input_array_permutation(data, 3);
  1336. // 2. Invoke SB( 2*i, 1+2*i, 30-8*i, 1 ) for i = 0..3.
  1337. for (auto i = 0u; i < 4; i++)
  1338. butterfly_rotation(data, high_precision_temp.span(), 2 * i, 1 + (2 * i), 30 - (8 * i), true);
  1339. // (8.7.1.1) NOTE - The values in array S require higher precision to avoid overflow. Using signed integers with
  1340. // 24 + BitDepth bits of precision is enough to avoid overflow.
  1341. const u8 bits = 24 + bit_depth;
  1342. for (auto i = 0u; i < 8; i++)
  1343. VERIFY(check_bounds(high_precision_temp[i], bits));
  1344. // 3. Invoke SH( i, 4+i ) for i = 0..3.
  1345. for (auto i = 0u; i < 4; i++)
  1346. hadamard_rotation(high_precision_temp.span(), data, i, 4 + i);
  1347. // 4. Invoke SB( 4+3*i, 5+i, 24-16*i, 1 ) for i = 0..1.
  1348. for (auto i = 0u; i < 2; i++)
  1349. butterfly_rotation(data, high_precision_temp.span(), 4 + (3 * i), 5 + i, 24 - (16 * i), true);
  1350. // Check again that we haven't exceeded the integer bounds.
  1351. for (auto i = 0u; i < 8; i++)
  1352. VERIFY(check_bounds(high_precision_temp[i], bits));
  1353. // 5. Invoke SH( 4+i, 6+i ) for i = 0..1.
  1354. for (auto i = 0u; i < 2; i++)
  1355. hadamard_rotation(high_precision_temp.span(), data, 4 + i, 6 + i);
  1356. // 6. Invoke H( i, 2+i, 0 ) for i = 0..1.
  1357. for (auto i = 0u; i < 2; i++)
  1358. hadamard_rotation_in_place(bit_depth, data, i, 2 + i, false);
  1359. // 7. Invoke B( 2+4*i, 3+4*i, 16, 1 ) for i = 0..1.
  1360. for (auto i = 0u; i < 2; i++)
  1361. butterfly_rotation_in_place(bit_depth, data, 2 + (4 * i), 3 + (4 * i), 16, true);
  1362. // 8. Invoke the ADST output array permutation process specified in section 8.7.1.5 with the input variable n
  1363. // set equal to 3.
  1364. inverse_asymmetric_discrete_sine_transform_output_array_permutation(data, 3);
  1365. // 9. Set T[ 1+2*i ] equal to -T[ 1+2*i ] for i = 0..3.
  1366. for (auto i = 0u; i < 4; i++) {
  1367. auto index = 1 + (2 * i);
  1368. data[index] = -data[index];
  1369. }
  1370. return {};
  1371. }
  1372. inline DecoderErrorOr<void> Decoder::inverse_asymmetric_discrete_sine_transform_16(u8 bit_depth, Span<Intermediate> data)
  1373. {
  1374. VERIFY(data.size() == 16);
  1375. // This process does an in-place transform of the array T using:
  1376. // A higher precision array S for intermediate results.
  1377. Array<i64, 16> high_precision_temp;
  1378. // The following ordered steps apply:
  1379. // 1. Invoke the ADST input array permutation process specified in section 8.7.1.4 with the input variable n set
  1380. // equal to 4.
  1381. inverse_asymmetric_discrete_sine_transform_input_array_permutation(data, 4);
  1382. // 2. Invoke SB( 2*i, 1+2*i, 31-4*i, 1 ) for i = 0..7.
  1383. for (auto i = 0u; i < 8; i++)
  1384. butterfly_rotation(data, high_precision_temp.span(), 2 * i, 1 + (2 * i), 31 - (4 * i), true);
  1385. // (8.7.1.1) The inverse asymmetric discrete sine transforms also make use of an intermediate array named S.
  1386. // The values in this array require higher precision to avoid overflow. Using signed integers with 24 +
  1387. // BitDepth bits of precision is enough to avoid overflow.
  1388. const u8 bits = 24 + bit_depth;
  1389. for (auto i = 0u; i < 16; i++)
  1390. VERIFY(check_bounds(data[i], bits));
  1391. // 3. Invoke SH( i, 8+i ) for i = 0..7.
  1392. for (auto i = 0u; i < 8; i++)
  1393. hadamard_rotation(high_precision_temp.span(), data, i, 8 + i);
  1394. // 4. Invoke SB( 8+2*i, 9+2*i, 28-16*i, 1 ) for i = 0..3.
  1395. for (auto i = 0u; i < 4; i++)
  1396. butterfly_rotation(data, high_precision_temp.span(), 8 + (2 * i), 9 + (2 * i), 128 + 28 - (16 * i), true);
  1397. // Check again that we haven't exceeded the integer bounds.
  1398. for (auto i = 0u; i < 16; i++)
  1399. VERIFY(check_bounds(data[i], bits));
  1400. // 5. Invoke SH( 8+i, 12+i ) for i = 0..3.
  1401. for (auto i = 0u; i < 4; i++)
  1402. hadamard_rotation(high_precision_temp.span(), data, 8 + i, 12 + i);
  1403. // 6. Invoke H( i, 4+i, 0 ) for i = 0..3.
  1404. for (auto i = 0u; i < 4; i++)
  1405. hadamard_rotation_in_place(bit_depth, data, i, 4 + i, false);
  1406. // 7. Invoke SB( 4+8*i+3*j, 5+8*i+j, 24-16*j, 1 ) for i = 0..1, for j = 0..1.
  1407. for (auto i = 0u; i < 2; i++)
  1408. for (auto j = 0u; j < 2; j++)
  1409. butterfly_rotation(data, high_precision_temp.span(), 4 + (8 * i) + (3 * j), 5 + (8 * i) + j, 24 - (16 * j), true);
  1410. // Check again that we haven't exceeded the integer bounds.
  1411. for (auto i = 0u; i < 16; i++)
  1412. VERIFY(check_bounds(data[i], bits));
  1413. // 8. Invoke SH( 4+8*j+i, 6+8*j+i ) for i = 0..1, j = 0..1.
  1414. for (auto i = 0u; i < 2; i++)
  1415. for (auto j = 0u; j < 2; j++)
  1416. hadamard_rotation(high_precision_temp.span(), data, 4 + (8 * j) + i, 6 + (8 * j) + i);
  1417. // 9. Invoke H( 8*j+i, 2+8*j+i, 0 ) for i = 0..1, for j = 0..1.
  1418. for (auto i = 0u; i < 2; i++)
  1419. for (auto j = 0u; j < 2; j++)
  1420. hadamard_rotation_in_place(bit_depth, data, (8 * j) + i, 2 + (8 * j) + i, false);
  1421. // 10. Invoke B( 2+4*j+8*i, 3+4*j+8*i, 48+64*(i^j), 0 ) for i = 0..1, for j = 0..1.
  1422. for (auto i = 0u; i < 2; i++)
  1423. for (auto j = 0u; j < 2; j++)
  1424. butterfly_rotation_in_place(bit_depth, data, 2 + (4 * j) + (8 * i), 3 + (4 * j) + (8 * i), 48 + (64 * (i ^ j)), false);
  1425. // 11. Invoke the ADST output array permutation process specified in section 8.7.1.5 with the input variable n
  1426. // set equal to 4.
  1427. inverse_asymmetric_discrete_sine_transform_output_array_permutation(data, 4);
  1428. // 12. Set T[ 1+12*j+2*i ] equal to -T[ 1+12*j+2*i ] for i = 0..1, for j = 0..1.
  1429. for (auto i = 0u; i < 2; i++) {
  1430. for (auto j = 0u; j < 2; j++) {
  1431. auto index = 1 + (12 * j) + (2 * i);
  1432. data[index] = -data[index];
  1433. }
  1434. }
  1435. return {};
  1436. }
  1437. inline DecoderErrorOr<void> Decoder::inverse_asymmetric_discrete_sine_transform(u8 bit_depth, Span<Intermediate> data, u8 log2_of_block_size)
  1438. {
  1439. // 8.7.1.9 Inverse ADST Process
  1440. // This process performs an in-place inverse ADST process on the array T of size 2^n for 2 ≤ n ≤ 4.
  1441. if (log2_of_block_size < 2 || log2_of_block_size > 4)
  1442. return DecoderError::corrupted("Block size was out of range"sv);
  1443. // The process to invoke depends on n as follows:
  1444. if (log2_of_block_size == 2) {
  1445. // − If n is equal to 2, invoke the Inverse ADST4 process specified in section 8.7.1.6.
  1446. inverse_asymmetric_discrete_sine_transform_4(bit_depth, data);
  1447. return {};
  1448. }
  1449. if (log2_of_block_size == 3) {
  1450. // − Otherwise if n is equal to 3, invoke the Inverse ADST8 process specified in section 8.7.1.7.
  1451. return inverse_asymmetric_discrete_sine_transform_8(bit_depth, data);
  1452. }
  1453. // − Otherwise (n is equal to 4), invoke the Inverse ADST16 process specified in section 8.7.1.8.
  1454. return inverse_asymmetric_discrete_sine_transform_16(bit_depth, data);
  1455. }
  1456. DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_context, Span<Intermediate> dequantized, u8 log2_of_block_size, u8 transform_type)
  1457. {
  1458. // This process performs a 2D inverse transform for an array of size 2^n by 2^n stored in the 2D array Dequant.
  1459. // The input to this process is a variable n (log2_of_block_size) that specifies the base 2 logarithm of the width of the transform.
  1460. // 1. Set the variable n0 (block_size) equal to 1 << n.
  1461. auto block_size = 1u << log2_of_block_size;
  1462. Array<Intermediate, maximum_transform_size> row_array;
  1463. Span<Intermediate> row = row_array.span().trim(block_size);
  1464. // 2. The row transforms with i = 0..(n0-1) are applied as follows:
  1465. for (auto i = 0u; i < block_size; i++) {
  1466. // 1. Set T[ j ] equal to Dequant[ i ][ j ] for j = 0..(n0-1).
  1467. for (auto j = 0u; j < block_size; j++)
  1468. row[j] = dequantized[index_from_row_and_column(i, j, block_size)];
  1469. // 2. If Lossless is equal to 1, invoke the Inverse WHT process as specified in section 8.7.1.10 with shift equal
  1470. // to 2.
  1471. if (block_context.frame_context.is_lossless()) {
  1472. TRY(inverse_walsh_hadamard_transform(row, log2_of_block_size, 2));
  1473. continue;
  1474. }
  1475. switch (transform_type) {
  1476. case DCT_DCT:
  1477. case ADST_DCT:
  1478. // Otherwise, if TxType is equal to DCT_DCT or TxType is equal to ADST_DCT, apply an inverse DCT as
  1479. // follows:
  1480. // 1. Invoke the inverse DCT permutation process as specified in section 8.7.1.2 with the input variable n.
  1481. TRY(inverse_discrete_cosine_transform_array_permutation(row, log2_of_block_size));
  1482. // 2. Invoke the inverse DCT process as specified in section 8.7.1.3 with the input variable n.
  1483. TRY(inverse_discrete_cosine_transform(block_context.frame_context.color_config.bit_depth, row, log2_of_block_size));
  1484. break;
  1485. case DCT_ADST:
  1486. case ADST_ADST:
  1487. // 4. Otherwise (TxType is equal to DCT_ADST or TxType is equal to ADST_ADST), invoke the inverse ADST
  1488. // process as specified in section 8.7.1.9 with input variable n.
  1489. TRY(inverse_asymmetric_discrete_sine_transform(block_context.frame_context.color_config.bit_depth, row, log2_of_block_size));
  1490. break;
  1491. default:
  1492. return DecoderError::corrupted("Unknown tx_type"sv);
  1493. }
  1494. // 5. Set Dequant[ i ][ j ] equal to T[ j ] for j = 0..(n0-1).
  1495. for (auto j = 0u; j < block_size; j++)
  1496. dequantized[index_from_row_and_column(i, j, block_size)] = row[j];
  1497. }
  1498. Array<Intermediate, maximum_transform_size> column_array;
  1499. auto column = column_array.span().trim(block_size);
  1500. // 3. The column transforms with j = 0..(n0-1) are applied as follows:
  1501. for (auto j = 0u; j < block_size; j++) {
  1502. // 1. Set T[ i ] equal to Dequant[ i ][ j ] for i = 0..(n0-1).
  1503. for (auto i = 0u; i < block_size; i++)
  1504. column[i] = dequantized[index_from_row_and_column(i, j, block_size)];
  1505. // 2. If Lossless is equal to 1, invoke the Inverse WHT process as specified in section 8.7.1.10 with shift equal
  1506. // to 0.
  1507. if (block_context.frame_context.is_lossless()) {
  1508. TRY(inverse_walsh_hadamard_transform(column, log2_of_block_size, 2));
  1509. continue;
  1510. }
  1511. switch (transform_type) {
  1512. case DCT_DCT:
  1513. case DCT_ADST:
  1514. // Otherwise, if TxType is equal to DCT_DCT or TxType is equal to DCT_ADST, apply an inverse DCT as
  1515. // follows:
  1516. // 1. Invoke the inverse DCT permutation process as specified in section 8.7.1.2 with the input variable n.
  1517. TRY(inverse_discrete_cosine_transform_array_permutation(column, log2_of_block_size));
  1518. // 2. Invoke the inverse DCT process as specified in section 8.7.1.3 with the input variable n.
  1519. TRY(inverse_discrete_cosine_transform(block_context.frame_context.color_config.bit_depth, column, log2_of_block_size));
  1520. break;
  1521. case ADST_DCT:
  1522. case ADST_ADST:
  1523. // 4. Otherwise (TxType is equal to ADST_DCT or TxType is equal to ADST_ADST), invoke the inverse ADST
  1524. // process as specified in section 8.7.1.9 with input variable n.
  1525. TRY(inverse_asymmetric_discrete_sine_transform(block_context.frame_context.color_config.bit_depth, column, log2_of_block_size));
  1526. break;
  1527. default:
  1528. VERIFY_NOT_REACHED();
  1529. }
  1530. // 5. If Lossless is equal to 1, set Dequant[ i ][ j ] equal to T[ i ] for i = 0..(n0-1).
  1531. for (auto i = 0u; i < block_size; i++)
  1532. dequantized[index_from_row_and_column(i, j, block_size)] = column[i];
  1533. // 6. Otherwise (Lossless is equal to 0), set Dequant[ i ][ j ] equal to Round2( T[ i ], Min( 6, n + 2 ) )
  1534. // for i = 0..(n0-1).
  1535. if (!block_context.frame_context.is_lossless()) {
  1536. for (auto i = 0u; i < block_size; i++) {
  1537. auto index = index_from_row_and_column(i, j, block_size);
  1538. dequantized[index] = round_2(dequantized[index], min(6, log2_of_block_size + 2));
  1539. }
  1540. }
  1541. }
  1542. return {};
  1543. }
  1544. DecoderErrorOr<void> Decoder::update_reference_frames(FrameContext const& frame_context)
  1545. {
  1546. // This process is invoked as the final step in decoding a frame.
  1547. // The inputs to this process are the samples in the current frame CurrFrame[ plane ][ x ][ y ].
  1548. // The output from this process is an updated set of reference frames and previous motion vectors.
  1549. // The following ordered steps apply:
  1550. // 1. For each value of i from 0 to NUM_REF_FRAMES - 1, the following applies if bit i of refresh_frame_flags
  1551. // is equal to 1 (i.e. if (refresh_frame_flags>>i)&1 is equal to 1):
  1552. for (u8 i = 0; i < NUM_REF_FRAMES; i++) {
  1553. if (frame_context.should_update_reference_frame_at_index(i)) {
  1554. // − RefFrameWidth[ i ] is set equal to FrameWidth.
  1555. // − RefFrameHeight[ i ] is set equal to FrameHeight.
  1556. m_parser->m_ref_frame_size[i] = frame_context.size();
  1557. // − RefSubsamplingX[ i ] is set equal to subsampling_x.
  1558. m_parser->m_ref_subsampling_x[i] = frame_context.color_config.subsampling_x;
  1559. // − RefSubsamplingY[ i ] is set equal to subsampling_y.
  1560. m_parser->m_ref_subsampling_y[i] = frame_context.color_config.subsampling_y;
  1561. // − RefBitDepth[ i ] is set equal to BitDepth.
  1562. m_parser->m_ref_bit_depth[i] = frame_context.color_config.bit_depth;
  1563. // − FrameStore[ i ][ 0 ][ y ][ x ] is set equal to CurrFrame[ 0 ][ y ][ x ] for x = 0..FrameWidth-1, for y =
  1564. // 0..FrameHeight-1.
  1565. // − FrameStore[ i ][ plane ][ y ][ x ] is set equal to CurrFrame[ plane ][ y ][ x ] for plane = 1..2, for x =
  1566. // 0..((FrameWidth+subsampling_x) >> subsampling_x)-1, for y = 0..((FrameHeight+subsampling_y) >>
  1567. // subsampling_y)-1.
  1568. // FIXME: Frame width is not equal to the buffer's stride. If we store the stride of the buffer with the reference
  1569. // frame, we can just copy the framebuffer data instead. Alternatively, we should crop the output framebuffer.
  1570. for (auto plane = 0u; plane < 3; plane++) {
  1571. auto width = frame_context.size().width();
  1572. auto height = frame_context.size().height();
  1573. auto stride = frame_context.columns() * 8;
  1574. if (plane > 0) {
  1575. width = (width + frame_context.color_config.subsampling_x) >> frame_context.color_config.subsampling_x;
  1576. height = (height + frame_context.color_config.subsampling_y) >> frame_context.color_config.subsampling_y;
  1577. stride >>= frame_context.color_config.subsampling_x;
  1578. }
  1579. auto original_buffer = get_output_buffer(plane);
  1580. auto& frame_store_buffer = m_parser->m_frame_store[i][plane];
  1581. frame_store_buffer.resize_and_keep_capacity(width * height);
  1582. for (auto x = 0u; x < width; x++) {
  1583. for (auto y = 0u; y < height; y++) {
  1584. auto sample = original_buffer[index_from_row_and_column(y, x, stride)];
  1585. frame_store_buffer[index_from_row_and_column(y, x, width)] = sample;
  1586. }
  1587. }
  1588. }
  1589. }
  1590. }
  1591. // 2. If show_existing_frame is equal to 0, the following applies:
  1592. if (!frame_context.shows_existing_frame()) {
  1593. DECODER_TRY_ALLOC(m_parser->m_previous_block_contexts.try_resize_to_match_other_vector2d(frame_context.block_contexts()));
  1594. // − PrevRefFrames[ row ][ col ][ list ] is set equal to RefFrames[ row ][ col ][ list ] for row = 0..MiRows-1,
  1595. // for col = 0..MiCols-1, for list = 0..1.
  1596. // − PrevMvs[ row ][ col ][ list ][ comp ] is set equal to Mvs[ row ][ col ][ list ][ comp ] for row = 0..MiRows-1,
  1597. // for col = 0..MiCols-1, for list = 0..1, for comp = 0..1.
  1598. // And from decode_frame():
  1599. // - If all of the following conditions are true, PrevSegmentIds[ row ][ col ] is set equal to
  1600. // SegmentIds[ row ][ col ] for row = 0..MiRows-1, for col = 0..MiCols-1:
  1601. // − show_existing_frame is equal to 0,
  1602. // − segmentation_enabled is equal to 1,
  1603. // − segmentation_update_map is equal to 1.
  1604. bool keep_segment_ids = !frame_context.shows_existing_frame() && frame_context.segmentation_enabled && frame_context.use_full_segment_id_tree;
  1605. frame_context.block_contexts().copy_to(m_parser->m_previous_block_contexts, [keep_segment_ids](FrameBlockContext context) {
  1606. auto persistent_context = PersistentBlockContext(context);
  1607. if (!keep_segment_ids)
  1608. persistent_context.segment_id = 0;
  1609. return persistent_context;
  1610. });
  1611. }
  1612. return {};
  1613. }
  1614. }