FlacWriter.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include "FlacWriter.h"
  7. #include <AK/BitStream.h>
  8. #include <AK/DisjointChunks.h>
  9. #include <AK/Endian.h>
  10. #include <AK/IntegralMath.h>
  11. #include <AK/MemoryStream.h>
  12. #include <AK/Statistics.h>
  13. #include <LibCrypto/Checksum/ChecksummingStream.h>
  14. namespace Audio {
  15. ErrorOr<NonnullOwnPtr<FlacWriter>> FlacWriter::create(NonnullOwnPtr<SeekableStream> stream, u32 sample_rate, u8 num_channels, u16 bits_per_sample)
  16. {
  17. auto writer = TRY(AK::adopt_nonnull_own_or_enomem(new (nothrow) FlacWriter(move(stream))));
  18. TRY(writer->set_bits_per_sample(bits_per_sample));
  19. TRY(writer->set_sample_rate(sample_rate));
  20. TRY(writer->set_num_channels(num_channels));
  21. return writer;
  22. }
  23. FlacWriter::FlacWriter(NonnullOwnPtr<SeekableStream> stream)
  24. : m_stream(move(stream))
  25. {
  26. }
  27. FlacWriter::~FlacWriter()
  28. {
  29. if (m_state != WriteState::FullyFinalized)
  30. (void)finalize();
  31. }
  32. ErrorOr<void> FlacWriter::finalize()
  33. {
  34. if (m_state == WriteState::FullyFinalized)
  35. return Error::from_string_view("File is already finalized"sv);
  36. if (m_state == WriteState::HeaderUnwritten)
  37. TRY(finalize_header_format());
  38. if (!m_sample_buffer.is_empty())
  39. TRY(write_frame());
  40. {
  41. // 1 byte metadata block header + 3 bytes size + 2*2 bytes min/max block size
  42. TRY(m_stream->seek(m_streaminfo_start_index + 8, AK::SeekMode::SetPosition));
  43. BigEndianOutputBitStream bit_stream { MaybeOwned<Stream> { *m_stream } };
  44. TRY(bit_stream.write_bits(m_min_frame_size, 24));
  45. TRY(bit_stream.write_bits(m_max_frame_size, 24));
  46. TRY(bit_stream.write_bits(m_sample_rate, 20));
  47. TRY(bit_stream.write_bits(m_num_channels - 1u, 3));
  48. TRY(bit_stream.write_bits(m_bits_per_sample - 1u, 5));
  49. TRY(bit_stream.write_bits(m_sample_count, 36));
  50. TRY(bit_stream.align_to_byte_boundary());
  51. }
  52. // TODO: Write the audio data MD5 to the header.
  53. m_stream->close();
  54. m_state = WriteState::FullyFinalized;
  55. return {};
  56. }
  57. ErrorOr<void> FlacWriter::finalize_header_format()
  58. {
  59. if (m_state != WriteState::HeaderUnwritten)
  60. return Error::from_string_view("Header format is already finalized"sv);
  61. TRY(write_header());
  62. m_state = WriteState::FormatFinalized;
  63. return {};
  64. }
  65. ErrorOr<void> FlacWriter::set_num_channels(u8 num_channels)
  66. {
  67. if (m_state != WriteState::HeaderUnwritten)
  68. return Error::from_string_view("Header format is already finalized"sv);
  69. if (num_channels > 8)
  70. return Error::from_string_view("FLAC doesn't support more than 8 channels"sv);
  71. m_num_channels = num_channels;
  72. return {};
  73. }
  74. ErrorOr<void> FlacWriter::set_sample_rate(u32 sample_rate)
  75. {
  76. if (m_state != WriteState::HeaderUnwritten)
  77. return Error::from_string_view("Header format is already finalized"sv);
  78. m_sample_rate = sample_rate;
  79. return {};
  80. }
  81. ErrorOr<void> FlacWriter::set_bits_per_sample(u16 bits_per_sample)
  82. {
  83. if (m_state != WriteState::HeaderUnwritten)
  84. return Error::from_string_view("Header format is already finalized"sv);
  85. if (bits_per_sample < 8 || bits_per_sample > 32)
  86. return Error::from_string_view("FLAC only supports bits per sample between 8 and 32"sv);
  87. m_bits_per_sample = bits_per_sample;
  88. return {};
  89. }
  90. ErrorOr<void> FlacWriter::write_header()
  91. {
  92. TRY(m_stream->write_until_depleted(flac_magic.bytes()));
  93. m_streaminfo_start_index = TRY(m_stream->tell());
  94. ByteBuffer data;
  95. // STREAMINFO is always exactly 34 bytes long.
  96. TRY(data.try_resize(34));
  97. BigEndianOutputBitStream header_stream { TRY(try_make<FixedMemoryStream>(data.bytes())) };
  98. // Duplication on purpose:
  99. // Minimum frame size.
  100. TRY(header_stream.write_bits(block_size, 16));
  101. // Maximum frame size.
  102. TRY(header_stream.write_bits(block_size, 16));
  103. // Leave the frame sizes as unknown for now.
  104. TRY(header_stream.write_bits(0u, 24));
  105. TRY(header_stream.write_bits(0u, 24));
  106. TRY(header_stream.write_bits(m_sample_rate, 20));
  107. TRY(header_stream.write_bits(m_num_channels - 1u, 3));
  108. TRY(header_stream.write_bits(m_bits_per_sample - 1u, 5));
  109. // Leave the sample count as unknown for now.
  110. TRY(header_stream.write_bits(0u, 36));
  111. // TODO: Calculate the MD5 signature of all of the audio data.
  112. auto md5 = TRY(ByteBuffer::create_zeroed(128u / 8u));
  113. TRY(header_stream.write_until_depleted(md5));
  114. FlacRawMetadataBlock streaminfo_block = {
  115. .is_last_block = true,
  116. .type = FlacMetadataBlockType::STREAMINFO,
  117. .length = static_cast<u32>(data.size()),
  118. .data = data,
  119. };
  120. TRY(m_stream->write_value(streaminfo_block));
  121. return {};
  122. }
  123. ErrorOr<void> FlacRawMetadataBlock::write_to_stream(Stream& stream) const
  124. {
  125. BigEndianOutputBitStream bit_stream { MaybeOwned<Stream> { stream } };
  126. TRY(bit_stream.write_bits(static_cast<u8>(is_last_block), 1));
  127. TRY(bit_stream.write_bits(to_underlying(type), 7));
  128. TRY(bit_stream.write_bits(length, 24));
  129. VERIFY(data.size() == length);
  130. TRY(bit_stream.write_until_depleted(data));
  131. return {};
  132. }
  133. // If the given sample count is uncommon, this function will return one of the uncommon marker block sizes.
  134. // The caller has to handle and add these later manually.
  135. static BlockSizeCategory to_common_block_size(u16 sample_count)
  136. {
  137. switch (sample_count) {
  138. case 192:
  139. return BlockSizeCategory::S192;
  140. case 576:
  141. return BlockSizeCategory::S576;
  142. case 1152:
  143. return BlockSizeCategory::S1152;
  144. case 2304:
  145. return BlockSizeCategory::S2304;
  146. case 4608:
  147. return BlockSizeCategory::S4608;
  148. case 256:
  149. return BlockSizeCategory::S256;
  150. case 512:
  151. return BlockSizeCategory::S512;
  152. case 1024:
  153. return BlockSizeCategory::S1024;
  154. case 2048:
  155. return BlockSizeCategory::S2048;
  156. case 4096:
  157. return BlockSizeCategory::S4096;
  158. case 8192:
  159. return BlockSizeCategory::S8192;
  160. case 16384:
  161. return BlockSizeCategory::S16384;
  162. case 32768:
  163. return BlockSizeCategory::S32768;
  164. }
  165. if (sample_count - 1 <= 0xff)
  166. return BlockSizeCategory::Uncommon8Bits;
  167. // Data type guarantees that 16-bit storage is possible.
  168. return BlockSizeCategory::Uncommon16Bits;
  169. }
  170. static ByteBuffer to_utf8(u64 value)
  171. {
  172. ByteBuffer buffer;
  173. if (value < 0x7f) {
  174. buffer.append(static_cast<u8>(value));
  175. } else if (value < 0x7ff) {
  176. buffer.append(static_cast<u8>(0b110'00000 | (value >> 6)));
  177. buffer.append(static_cast<u8>(0b10'000000 | (value & 0b111111)));
  178. } else if (value < 0xffff) {
  179. buffer.append(static_cast<u8>(0b1110'0000 | (value >> 12)));
  180. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 6) & 0b111111)));
  181. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 0) & 0b111111)));
  182. } else if (value < 0x1f'ffff) {
  183. buffer.append(static_cast<u8>(0b11110'000 | (value >> 18)));
  184. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 12) & 0b111111)));
  185. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 6) & 0b111111)));
  186. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 0) & 0b111111)));
  187. } else if (value < 0x3ff'ffff) {
  188. buffer.append(static_cast<u8>(0b111110'00 | (value >> 24)));
  189. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 18) & 0b111111)));
  190. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 12) & 0b111111)));
  191. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 6) & 0b111111)));
  192. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 0) & 0b111111)));
  193. } else if (value < 0x7fff'ffff) {
  194. buffer.append(static_cast<u8>(0b1111110'0 | (value >> 30)));
  195. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 24) & 0b111111)));
  196. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 18) & 0b111111)));
  197. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 12) & 0b111111)));
  198. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 6) & 0b111111)));
  199. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 0) & 0b111111)));
  200. } else if (value < 0xf'ffff'ffff) {
  201. buffer.append(static_cast<u8>(0b11111110));
  202. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 30) & 0b111111)));
  203. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 24) & 0b111111)));
  204. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 18) & 0b111111)));
  205. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 12) & 0b111111)));
  206. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 6) & 0b111111)));
  207. buffer.append(static_cast<u8>(0b10'000000 | ((value >> 0) & 0b111111)));
  208. } else {
  209. // Anything larger is illegal even in expanded UTF-8, but FLAC only passes 32-bit values anyways.
  210. VERIFY_NOT_REACHED();
  211. }
  212. return buffer;
  213. }
  214. ErrorOr<void> FlacFrameHeader::write_to_stream(Stream& stream) const
  215. {
  216. Crypto::Checksum::ChecksummingStream<FlacFrameHeaderCRC> checksumming_stream { MaybeOwned<Stream> { stream } };
  217. BigEndianOutputBitStream bit_stream { MaybeOwned<Stream> { checksumming_stream } };
  218. TRY(bit_stream.write_bits(0b11111111111110u, 14));
  219. TRY(bit_stream.write_bits(0u, 1));
  220. TRY(bit_stream.write_bits(to_underlying(blocking_strategy), 1));
  221. auto common_block_size = to_common_block_size(sample_count);
  222. TRY(bit_stream.write_bits(to_underlying(common_block_size), 4));
  223. // We always store sample rate in the file header.
  224. TRY(bit_stream.write_bits(0u, 4));
  225. TRY(bit_stream.write_bits(to_underlying(channels), 4));
  226. // We always store bit depth in the file header.
  227. TRY(bit_stream.write_bits(0u, 3));
  228. // Reserved zero bit.
  229. TRY(bit_stream.write_bits(0u, 1));
  230. auto coded_number = to_utf8(sample_or_frame_index);
  231. TRY(bit_stream.write_until_depleted(coded_number));
  232. if (common_block_size == BlockSizeCategory::Uncommon8Bits)
  233. TRY(bit_stream.write_value(static_cast<u8>(sample_count - 1)));
  234. if (common_block_size == BlockSizeCategory::Uncommon16Bits)
  235. TRY(bit_stream.write_value(BigEndian<u16>(static_cast<u16>(sample_count - 1))));
  236. // Ensure that the checksum is calculated correctly.
  237. TRY(bit_stream.align_to_byte_boundary());
  238. auto checksum = checksumming_stream.digest();
  239. TRY(bit_stream.write_value(checksum));
  240. return {};
  241. }
  242. ErrorOr<void> FlacWriter::write_samples(ReadonlySpan<Sample> samples)
  243. {
  244. if (m_state == WriteState::FullyFinalized)
  245. return Error::from_string_view("File is already finalized"sv);
  246. auto remaining_samples = samples;
  247. while (remaining_samples.size() > 0) {
  248. if (m_sample_buffer.size() == block_size) {
  249. TRY(write_frame());
  250. m_sample_buffer.clear();
  251. }
  252. auto amount_to_copy = min(remaining_samples.size(), m_sample_buffer.capacity() - m_sample_buffer.size());
  253. auto current_buffer_size = m_sample_buffer.size();
  254. TRY(m_sample_buffer.try_resize_and_keep_capacity(current_buffer_size + amount_to_copy));
  255. remaining_samples.copy_trimmed_to(m_sample_buffer.span().slice(current_buffer_size));
  256. remaining_samples = remaining_samples.slice(amount_to_copy);
  257. }
  258. // Ensure that the buffer is flushed if possible.
  259. if (m_sample_buffer.size() == block_size) {
  260. TRY(write_frame());
  261. m_sample_buffer.clear();
  262. }
  263. return {};
  264. }
  265. ErrorOr<void> FlacWriter::write_frame()
  266. {
  267. auto frame_samples = move(m_sample_buffer);
  268. // De-interleave and integer-quantize subframes.
  269. float sample_rescale = static_cast<float>(1 << (m_bits_per_sample - 1));
  270. auto subframe_samples = Vector<Vector<i64, block_size>>();
  271. TRY(subframe_samples.try_resize_and_keep_capacity(m_num_channels));
  272. for (auto const& sample : frame_samples) {
  273. TRY(subframe_samples[0].try_append(static_cast<i64>(sample.left * sample_rescale)));
  274. // FIXME: We don't have proper data for any channels past 2.
  275. for (auto i = 1; i < m_num_channels; ++i)
  276. TRY(subframe_samples[i].try_append(static_cast<i64>(sample.right * sample_rescale)));
  277. }
  278. auto channel_type = static_cast<FlacFrameChannelType>(m_num_channels - 1);
  279. if (channel_type == FlacFrameChannelType::Stereo) {
  280. auto const& left_channel = subframe_samples[0];
  281. auto const& right_channel = subframe_samples[1];
  282. Vector<i64, block_size> mid_channel;
  283. Vector<i64, block_size> side_channel;
  284. TRY(mid_channel.try_ensure_capacity(left_channel.size()));
  285. TRY(side_channel.try_ensure_capacity(left_channel.size()));
  286. for (auto i = 0u; i < left_channel.size(); ++i) {
  287. auto mid = (left_channel[i] + right_channel[i]) / 2;
  288. auto side = left_channel[i] - right_channel[i];
  289. mid_channel.unchecked_append(mid);
  290. side_channel.unchecked_append(side);
  291. }
  292. AK::Statistics<i64, AK::DisjointSpans<i64>> normal_costs {
  293. AK::DisjointSpans<i64> { { subframe_samples[0], subframe_samples[1] } }
  294. };
  295. AK::Statistics<i64, AK::DisjointSpans<i64>> correlated_costs {
  296. AK::DisjointSpans<i64> { { mid_channel, side_channel } }
  297. };
  298. if (correlated_costs.standard_deviation() < normal_costs.standard_deviation()) {
  299. dbgln_if(FLAC_ENCODER_DEBUG, "Using channel coupling since sample stddev {} is better than {}", correlated_costs.standard_deviation(), normal_costs.standard_deviation());
  300. channel_type = FlacFrameChannelType::MidSideStereo;
  301. subframe_samples[0] = move(mid_channel);
  302. subframe_samples[1] = move(side_channel);
  303. }
  304. }
  305. return write_frame_for(subframe_samples, channel_type);
  306. }
  307. ErrorOr<void> FlacWriter::write_frame_for(ReadonlySpan<Vector<i64, block_size>> subblock, FlacFrameChannelType channel_type)
  308. {
  309. auto sample_count = subblock.first().size();
  310. FlacFrameHeader header {
  311. .sample_rate = m_sample_rate,
  312. .sample_count = static_cast<u16>(sample_count),
  313. .sample_or_frame_index = static_cast<u32>(m_current_frame),
  314. .blocking_strategy = BlockingStrategy::Fixed,
  315. // FIXME: We should brute-force channel coupling for stereo.
  316. .channels = channel_type,
  317. .bit_depth = static_cast<u8>(m_bits_per_sample),
  318. // Calculated for us during header write.
  319. .checksum = 0,
  320. };
  321. auto frame_stream = Crypto::Checksum::ChecksummingStream<IBMCRC> { MaybeOwned<Stream> { *m_stream } };
  322. auto frame_start_offset = TRY(m_stream->tell());
  323. TRY(frame_stream.write_value(header));
  324. BigEndianOutputBitStream bit_stream { MaybeOwned<Stream> { frame_stream } };
  325. for (auto i = 0u; i < subblock.size(); ++i) {
  326. auto const& subframe = subblock[i];
  327. auto bits_per_sample = m_bits_per_sample;
  328. // Side channels need an extra bit per sample.
  329. if ((i == 1 && (channel_type == FlacFrameChannelType::LeftSideStereo || channel_type == FlacFrameChannelType::MidSideStereo))
  330. || (i == 0 && channel_type == FlacFrameChannelType::RightSideStereo)) {
  331. bits_per_sample++;
  332. }
  333. TRY(write_subframe(subframe.span(), bit_stream, bits_per_sample));
  334. }
  335. TRY(bit_stream.align_to_byte_boundary());
  336. auto frame_crc = frame_stream.digest();
  337. dbgln_if(FLAC_ENCODER_DEBUG, "Frame {:4} CRC: {:04x}", m_current_frame, frame_crc);
  338. TRY(frame_stream.write_value<AK::BigEndian<u16>>(frame_crc));
  339. auto frame_end_offset = TRY(m_stream->tell());
  340. auto frame_size = frame_end_offset - frame_start_offset;
  341. m_max_frame_size = max(m_max_frame_size, frame_size);
  342. m_min_frame_size = min(m_min_frame_size, frame_size);
  343. m_current_frame++;
  344. m_sample_count += sample_count;
  345. return {};
  346. }
  347. ErrorOr<void> FlacWriter::write_subframe(ReadonlySpan<i64> subframe, BigEndianOutputBitStream& bit_stream, u8 bits_per_sample)
  348. {
  349. // The current subframe encoding strategy is as follows:
  350. // - Check if the subframe is constant; use constant encoding in this case.
  351. // - Try all fixed predictors and record the resulting residuals.
  352. // - Estimate their encoding cost by taking the sum of all absolute logarithmic residuals,
  353. // which is an accurate estimate of the final encoded size of the residuals.
  354. // - Accurately estimate the encoding cost of a verbatim subframe.
  355. // - Select the encoding strategy with the lowest cost out of this selection.
  356. auto constant_value = subframe[0];
  357. auto is_constant = true;
  358. for (auto const sample : subframe) {
  359. if (sample != constant_value) {
  360. is_constant = false;
  361. break;
  362. }
  363. }
  364. if (is_constant) {
  365. dbgln_if(FLAC_ENCODER_DEBUG, "Encoding constant frame with value {}", constant_value);
  366. TRY(bit_stream.write_bits(1u, 0));
  367. TRY(bit_stream.write_bits(to_underlying(FlacSubframeType::Constant), 6));
  368. TRY(bit_stream.write_bits(1u, 0));
  369. TRY(bit_stream.write_bits(bit_cast<u64>(constant_value), bits_per_sample));
  370. return {};
  371. }
  372. auto verbatim_cost_bits = subframe.size() * bits_per_sample;
  373. Optional<FlacLPCEncodedSubframe> best_lpc_subframe;
  374. auto current_min_cost = verbatim_cost_bits;
  375. for (auto order : { FlacFixedLPC::Zero, FlacFixedLPC::One, FlacFixedLPC::Two, FlacFixedLPC::Three, FlacFixedLPC::Four }) {
  376. // Too many warm-up samples would be required; the lower-level encoding procedures assume that this was checked.
  377. if (to_underlying(order) > subframe.size())
  378. continue;
  379. auto encode_result = TRY(encode_fixed_lpc(order, subframe, current_min_cost, bits_per_sample));
  380. if (encode_result.has_value() && encode_result.value().residual_cost_bits < current_min_cost) {
  381. current_min_cost = encode_result.value().residual_cost_bits;
  382. best_lpc_subframe = encode_result.release_value();
  383. }
  384. }
  385. // No LPC encoding was better than verbatim.
  386. if (!best_lpc_subframe.has_value()) {
  387. dbgln_if(FLAC_ENCODER_DEBUG, "Best subframe type was Verbatim; encoding {} samples at {} bps = {} bits", subframe.size(), m_bits_per_sample, verbatim_cost_bits);
  388. TRY(write_verbatim_subframe(subframe, bit_stream, bits_per_sample));
  389. } else {
  390. dbgln_if(FLAC_ENCODER_DEBUG, "Best subframe type was Fixed LPC order {} (estimated cost {} bits); encoding {} samples", to_underlying(best_lpc_subframe->coefficients.get<FlacFixedLPC>()), best_lpc_subframe->residual_cost_bits, subframe.size());
  391. TRY(write_lpc_subframe(best_lpc_subframe.release_value(), bit_stream, bits_per_sample));
  392. }
  393. return {};
  394. }
  395. ErrorOr<Optional<FlacLPCEncodedSubframe>> FlacWriter::encode_fixed_lpc(FlacFixedLPC order, ReadonlySpan<i64> subframe, size_t current_min_cost, u8 bits_per_sample)
  396. {
  397. FlacLPCEncodedSubframe lpc {
  398. .warm_up_samples = Vector<i64> { subframe.trim(to_underlying(order)) },
  399. .coefficients = order,
  400. .residuals {},
  401. // Warm-up sample cost.
  402. .residual_cost_bits = to_underlying(order) * bits_per_sample,
  403. .single_partition_optimal_order {},
  404. };
  405. TRY(lpc.residuals.try_ensure_capacity(subframe.size() - to_underlying(order)));
  406. Vector<i64> predicted;
  407. TRY(predicted.try_resize_and_keep_capacity(subframe.size()));
  408. lpc.warm_up_samples.span().copy_trimmed_to(predicted);
  409. // NOTE: Although we can't interrupt the prediction if the corresponding residuals would become too bad,
  410. // we don't need to branch on the order in every loop during prediction, meaning this shouldn't cost us much.
  411. predict_fixed_lpc(order, subframe, predicted);
  412. // There isn’t really a way of computing an LPC’s cost without performing most of the calculations, including a Rice parameter search.
  413. // This is nevertheless optimized in multiple ways, so that we always bail out once we are sure no improvements can be made.
  414. auto extra_residual_cost = NumericLimits<size_t>::max();
  415. // Keep track of when we want to estimate costs again. We don't do this for every new residual since it's an expensive procedure.
  416. // The likelihood for misprediction is pretty high for large orders; start with a later index for them.
  417. auto next_cost_estimation_index = min(subframe.size() - 1, first_residual_estimation * (to_underlying(order) + 1));
  418. for (auto i = to_underlying(order); i < subframe.size(); ++i) {
  419. auto residual = subframe[i] - predicted[i];
  420. if (!AK::is_within_range<i32>(residual)) {
  421. dbgln_if(FLAC_ENCODER_DEBUG, "Bailing from Fixed LPC order {} due to residual overflow ({} is outside the 32-bit range)", to_underlying(order), residual);
  422. return Optional<FlacLPCEncodedSubframe> {};
  423. }
  424. lpc.residuals.append(residual);
  425. if (i >= next_cost_estimation_index) {
  426. // Find best exponential Golomb order.
  427. // Storing this in the LPC data allows us to automatically reuse the computation during LPC encoding.
  428. // FIXME: Use more than one partition to improve compression.
  429. // FIXME: Investigate whether this can be estimated “good enough” to improve performance at the cost of compression strength.
  430. // Especially at larger sample counts, it is unlikely that we will find a different optimal order.
  431. // Therefore, use a zig-zag search around the previous optimal order.
  432. extra_residual_cost = NumericLimits<size_t>::max();
  433. auto start_order = lpc.single_partition_optimal_order;
  434. size_t useless_parameters = 0;
  435. size_t steps = 0;
  436. constexpr auto max_rice_parameter = AK::exp2(4) - 1;
  437. for (auto offset = 0; start_order + offset < max_rice_parameter || start_order - offset >= 0; ++offset) {
  438. for (auto factor : { -1, 1 }) {
  439. auto k = start_order + factor * offset;
  440. if (k >= max_rice_parameter || k < 0)
  441. continue;
  442. auto order_cost = count_exp_golomb_bits_in(k, lpc.residuals);
  443. if (order_cost < extra_residual_cost) {
  444. extra_residual_cost = order_cost;
  445. lpc.single_partition_optimal_order = k;
  446. } else {
  447. useless_parameters++;
  448. }
  449. steps++;
  450. // Don’t do 0 twice.
  451. if (offset == 0)
  452. break;
  453. }
  454. // If we found enough useless parameters, we probably won't find useful ones anymore.
  455. // The only exception is the first ever parameter search, where we search everything.
  456. if (useless_parameters >= useless_parameter_threshold && start_order != 0)
  457. break;
  458. }
  459. // Min cost exceeded; bail out.
  460. if (lpc.residual_cost_bits + extra_residual_cost > current_min_cost) {
  461. dbgln_if(FLAC_ENCODER_DEBUG, " Bailing from Fixed LPC order {} at sample index {} and cost {} (best {})", to_underlying(order), i, lpc.residual_cost_bits + extra_residual_cost, current_min_cost);
  462. return Optional<FlacLPCEncodedSubframe> {};
  463. }
  464. // Figure out when to next estimate costs.
  465. auto estimated_bits_per_residual = static_cast<double>(extra_residual_cost) / static_cast<double>(i);
  466. auto estimated_residuals_for_min_cost = static_cast<double>(current_min_cost) / estimated_bits_per_residual;
  467. auto unchecked_next_cost_estimation_index = AK::round_to<size_t>(estimated_residuals_for_min_cost * (1 - residual_cost_margin));
  468. // Check either at the estimated residual, or the next residual if that is in the past, or the last residual.
  469. next_cost_estimation_index = min(subframe.size() - 1, max(unchecked_next_cost_estimation_index, i + min_residual_estimation_step));
  470. dbgln_if(FLAC_ENCODER_DEBUG, " {} {:4} Estimate cost/residual {:.1f} (param {:2} after {:2} steps), will hit at {:6.1f}, jumping to {:4} (sanitized to {:4})", to_underlying(order), i, estimated_bits_per_residual, lpc.single_partition_optimal_order, steps, estimated_residuals_for_min_cost, unchecked_next_cost_estimation_index, next_cost_estimation_index);
  471. }
  472. }
  473. lpc.residual_cost_bits += extra_residual_cost;
  474. return lpc;
  475. }
  476. void predict_fixed_lpc(FlacFixedLPC order, ReadonlySpan<i64> samples, Span<i64> predicted_output)
  477. {
  478. switch (order) {
  479. case FlacFixedLPC::Zero:
  480. // s_0(t) = 0
  481. for (auto i = to_underlying(order); i < predicted_output.size(); ++i)
  482. predicted_output[i] += 0;
  483. break;
  484. case FlacFixedLPC::One:
  485. // s_1(t) = s(t-1)
  486. for (auto i = to_underlying(order); i < predicted_output.size(); ++i)
  487. predicted_output[i] += samples[i - 1];
  488. break;
  489. case FlacFixedLPC::Two:
  490. // s_2(t) = 2s(t-1) - s(t-2)
  491. for (auto i = to_underlying(order); i < predicted_output.size(); ++i)
  492. predicted_output[i] += 2 * samples[i - 1] - samples[i - 2];
  493. break;
  494. case FlacFixedLPC::Three:
  495. // s_3(t) = 3s(t-1) - 3s(t-2) + s(t-3)
  496. for (auto i = to_underlying(order); i < predicted_output.size(); ++i)
  497. predicted_output[i] += 3 * samples[i - 1] - 3 * samples[i - 2] + samples[i - 3];
  498. break;
  499. case FlacFixedLPC::Four:
  500. // s_4(t) = 4s(t-1) - 6s(t-2) + 4s(t-3) - s(t-4)
  501. for (auto i = to_underlying(order); i < predicted_output.size(); ++i)
  502. predicted_output[i] += 4 * samples[i - 1] - 6 * samples[i - 2] + 4 * samples[i - 3] - samples[i - 4];
  503. break;
  504. default:
  505. VERIFY_NOT_REACHED();
  506. }
  507. }
  508. // https://www.ietf.org/archive/id/draft-ietf-cellar-flac-08.html#name-verbatim-subframe
  509. ErrorOr<void> FlacWriter::write_verbatim_subframe(ReadonlySpan<i64> subframe, BigEndianOutputBitStream& bit_stream, u8 bits_per_sample)
  510. {
  511. TRY(bit_stream.write_bits(0u, 1));
  512. TRY(bit_stream.write_bits(to_underlying(FlacSubframeType::Verbatim), 6));
  513. TRY(bit_stream.write_bits(0u, 1));
  514. for (auto const& sample : subframe)
  515. TRY(bit_stream.write_bits(bit_cast<u64>(sample), bits_per_sample));
  516. return {};
  517. }
  518. // https://www.ietf.org/archive/id/draft-ietf-cellar-flac-08.html#name-fixed-predictor-subframe
  519. ErrorOr<void> FlacWriter::write_lpc_subframe(FlacLPCEncodedSubframe lpc_subframe, BigEndianOutputBitStream& bit_stream, u8 bits_per_sample)
  520. {
  521. // Reserved.
  522. TRY(bit_stream.write_bits(0u, 1));
  523. // 9.2.1 Subframe header (https://www.ietf.org/archive/id/draft-ietf-cellar-flac-08.html#name-subframe-header)
  524. u8 encoded_type;
  525. if (lpc_subframe.coefficients.has<FlacFixedLPC>())
  526. encoded_type = to_underlying(lpc_subframe.coefficients.get<FlacFixedLPC>()) + to_underlying(FlacSubframeType::Fixed);
  527. else
  528. encoded_type = lpc_subframe.coefficients.get<Vector<i64>>().size() - 1 + to_underlying(FlacSubframeType::LPC);
  529. TRY(bit_stream.write_bits(encoded_type, 6));
  530. // No wasted bits per sample (unnecessary for the vast majority of data).
  531. TRY(bit_stream.write_bits(0u, 1));
  532. for (auto const& warm_up_sample : lpc_subframe.warm_up_samples)
  533. TRY(bit_stream.write_bits(bit_cast<u64>(warm_up_sample), bits_per_sample));
  534. // 4-bit Rice parameters.
  535. TRY(bit_stream.write_bits(0b00u, 2));
  536. // Only one partition (2^0 = 1).
  537. TRY(bit_stream.write_bits(0b0000u, 4));
  538. TRY(write_rice_partition(lpc_subframe.single_partition_optimal_order, lpc_subframe.residuals, bit_stream));
  539. return {};
  540. }
  541. ErrorOr<void> FlacWriter::write_rice_partition(u8 k, ReadonlySpan<i64> residuals, BigEndianOutputBitStream& bit_stream)
  542. {
  543. TRY(bit_stream.write_bits(k, 4));
  544. for (auto const& residual : residuals)
  545. TRY(encode_unsigned_exp_golomb(k, static_cast<i32>(residual), bit_stream));
  546. return {};
  547. }
  548. u32 signed_to_rice(i32 x)
  549. {
  550. // Implements (x < 0 ? -1 : 0) + 2 * abs(x) in about half as many instructions.
  551. // The reference encoder’s implementation is known to be the fastest on -O2/3 clang and gcc:
  552. // x << 1 = multiply by 2.
  553. // For negative numbers, x >> 31 will create an all-ones XOR mask, meaning that the number will be inverted.
  554. // In two's complement this is -value - 1, exactly what we need.
  555. // For positive numbers, x >> 31 == 0.
  556. return static_cast<u32>((x << 1) ^ (x >> 31));
  557. }
  558. // Adopted from https://github.com/xiph/flac/blob/28e4f0528c76b296c561e922ba67d43751990599/src/libFLAC/bitwriter.c#L727
  559. ErrorOr<void> encode_unsigned_exp_golomb(u8 k, i32 value, BigEndianOutputBitStream& bit_stream)
  560. {
  561. auto zigzag_encoded = signed_to_rice(value);
  562. auto msbs = zigzag_encoded >> k;
  563. auto pattern = 1u << k;
  564. pattern |= zigzag_encoded & ((1 << k) - 1);
  565. TRY(bit_stream.write_bits(0u, msbs));
  566. TRY(bit_stream.write_bits(pattern, k + 1));
  567. return {};
  568. }
  569. // Adopted from count_rice_bits_in_partition():
  570. // https://github.com/xiph/flac/blob/28e4f0528c76b296c561e922ba67d43751990599/src/libFLAC/stream_encoder.c#L4299
  571. size_t count_exp_golomb_bits_in(u8 k, ReadonlySpan<i64> residuals)
  572. {
  573. // Exponential Golomb order size (4).
  574. // One unary stop bit and the entire exponential Golomb parameter for every residual.
  575. size_t partition_bits = 4 + (1 + k) * residuals.size();
  576. // Bit magic to compute the amount of leading unary bits.
  577. for (auto const& residual : residuals)
  578. partition_bits += (static_cast<u32>((residual << 1) ^ (residual >> 31)) >> k);
  579. return partition_bits;
  580. }
  581. }