MemoryStream.cpp 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * Copyright (c) 2021, kleines Filmröllchen <filmroellchen@serenityos.org>.
  3. * Copyright (c) 2022, Tim Schumacher <timschumi@gmx.de>.
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/ByteBuffer.h>
  8. #include <AK/FixedArray.h>
  9. #include <AK/MemMem.h>
  10. #include <AK/MemoryStream.h>
  11. namespace AK {
  12. FixedMemoryStream::FixedMemoryStream(Bytes bytes, Mode mode)
  13. : m_bytes(bytes)
  14. , m_writing_enabled(mode == Mode::ReadWrite)
  15. {
  16. }
  17. FixedMemoryStream::FixedMemoryStream(ReadonlyBytes bytes)
  18. : m_bytes({ const_cast<u8*>(bytes.data()), bytes.size() })
  19. , m_writing_enabled(false)
  20. {
  21. }
  22. bool FixedMemoryStream::is_eof() const
  23. {
  24. return m_offset >= m_bytes.size();
  25. }
  26. bool FixedMemoryStream::is_open() const
  27. {
  28. return true;
  29. }
  30. void FixedMemoryStream::close()
  31. {
  32. // FIXME: It doesn't make sense to close a memory stream. Therefore, we don't do anything here. Is that fine?
  33. }
  34. ErrorOr<void> FixedMemoryStream::truncate(size_t)
  35. {
  36. return Error::from_errno(EBADF);
  37. }
  38. ErrorOr<Bytes> FixedMemoryStream::read_some(Bytes bytes)
  39. {
  40. auto to_read = min(remaining(), bytes.size());
  41. if (to_read == 0)
  42. return Bytes {};
  43. m_bytes.slice(m_offset, to_read).copy_to(bytes);
  44. m_offset += to_read;
  45. return bytes.trim(to_read);
  46. }
  47. ErrorOr<void> FixedMemoryStream::read_until_filled(AK::Bytes bytes)
  48. {
  49. if (remaining() < bytes.size())
  50. return Error::from_string_view_or_print_error_and_return_errno("Can't read past the end of the stream memory"sv, EINVAL);
  51. m_bytes.slice(m_offset).copy_trimmed_to(bytes);
  52. m_offset += bytes.size();
  53. return {};
  54. }
  55. ErrorOr<size_t> FixedMemoryStream::seek(i64 offset, SeekMode seek_mode)
  56. {
  57. switch (seek_mode) {
  58. case SeekMode::SetPosition:
  59. if (offset > static_cast<i64>(m_bytes.size()))
  60. return Error::from_string_view_or_print_error_and_return_errno("Offset past the end of the stream memory"sv, EINVAL);
  61. m_offset = offset;
  62. break;
  63. case SeekMode::FromCurrentPosition:
  64. if (offset + static_cast<i64>(m_offset) > static_cast<i64>(m_bytes.size()))
  65. return Error::from_string_view_or_print_error_and_return_errno("Offset past the end of the stream memory"sv, EINVAL);
  66. m_offset += offset;
  67. break;
  68. case SeekMode::FromEndPosition:
  69. if (-offset > static_cast<i64>(m_bytes.size()))
  70. return Error::from_string_view_or_print_error_and_return_errno("Offset past the start of the stream memory"sv, EINVAL);
  71. m_offset = m_bytes.size() + offset;
  72. break;
  73. }
  74. return m_offset;
  75. }
  76. ErrorOr<size_t> FixedMemoryStream::write_some(ReadonlyBytes bytes)
  77. {
  78. // MemoryStream isn't based on file-descriptors, but since most other
  79. // Stream implementations are, the interface specifies EBADF as the
  80. // "we don't support this particular operation" error code.
  81. if (!m_writing_enabled)
  82. return Error::from_errno(EBADF);
  83. // FIXME: Can this not error?
  84. auto const nwritten = bytes.copy_trimmed_to(m_bytes.slice(m_offset));
  85. m_offset += nwritten;
  86. return nwritten;
  87. }
  88. ErrorOr<void> FixedMemoryStream::write_until_depleted(ReadonlyBytes bytes)
  89. {
  90. if (remaining() < bytes.size())
  91. return Error::from_string_view_or_print_error_and_return_errno("Write of entire buffer ends past the memory area"sv, EINVAL);
  92. TRY(write_some(bytes));
  93. return {};
  94. }
  95. size_t FixedMemoryStream::offset() const
  96. {
  97. return m_offset;
  98. }
  99. size_t FixedMemoryStream::remaining() const
  100. {
  101. return m_bytes.size() - m_offset;
  102. }
  103. ErrorOr<Bytes> AllocatingMemoryStream::read_some(Bytes bytes)
  104. {
  105. size_t read_bytes = 0;
  106. while (read_bytes < bytes.size()) {
  107. VERIFY(m_write_offset >= m_read_offset);
  108. auto range = TRY(next_read_range());
  109. if (range.size() == 0)
  110. break;
  111. auto copied_bytes = range.copy_trimmed_to(bytes.slice(read_bytes));
  112. read_bytes += copied_bytes;
  113. m_read_offset += copied_bytes;
  114. }
  115. cleanup_unused_chunks();
  116. return bytes.trim(read_bytes);
  117. }
  118. ErrorOr<size_t> AllocatingMemoryStream::write_some(ReadonlyBytes bytes)
  119. {
  120. size_t written_bytes = 0;
  121. while (written_bytes < bytes.size()) {
  122. VERIFY(m_write_offset >= m_read_offset);
  123. auto range = TRY(next_write_range());
  124. auto copied_bytes = bytes.slice(written_bytes).copy_trimmed_to(range);
  125. written_bytes += copied_bytes;
  126. m_write_offset += copied_bytes;
  127. }
  128. return written_bytes;
  129. }
  130. ErrorOr<void> AllocatingMemoryStream::discard(size_t count)
  131. {
  132. VERIFY(m_write_offset >= m_read_offset);
  133. if (count > used_buffer_size())
  134. return Error::from_string_view_or_print_error_and_return_errno("Number of discarded bytes is higher than the number of allocated bytes"sv, EINVAL);
  135. m_read_offset += count;
  136. cleanup_unused_chunks();
  137. return {};
  138. }
  139. bool AllocatingMemoryStream::is_eof() const
  140. {
  141. return used_buffer_size() == 0;
  142. }
  143. bool AllocatingMemoryStream::is_open() const
  144. {
  145. return true;
  146. }
  147. void AllocatingMemoryStream::close()
  148. {
  149. }
  150. size_t AllocatingMemoryStream::used_buffer_size() const
  151. {
  152. return m_write_offset - m_read_offset;
  153. }
  154. ErrorOr<Optional<size_t>> AllocatingMemoryStream::offset_of(ReadonlyBytes needle) const
  155. {
  156. VERIFY(m_write_offset >= m_read_offset);
  157. if (m_chunks.size() == 0)
  158. return Optional<size_t> {};
  159. // Ensure that we don't have empty chunks at the beginning of the stream. Our trimming implementation
  160. // assumes this to be the case, since this should be held up by `cleanup_unused_chunks()` at all times.
  161. VERIFY(m_read_offset < CHUNK_SIZE);
  162. auto empty_chunks_at_end = ((m_chunks.size() * CHUNK_SIZE - m_write_offset) / CHUNK_SIZE);
  163. auto chunk_count = m_chunks.size() - empty_chunks_at_end;
  164. auto search_spans = TRY(FixedArray<ReadonlyBytes>::create(chunk_count));
  165. for (size_t i = 0; i < chunk_count; i++) {
  166. search_spans[i] = m_chunks[i].span();
  167. }
  168. auto used_size_of_last_chunk = m_write_offset % CHUNK_SIZE;
  169. // The case where the stored write offset is actually the used space is the only case where a result of zero
  170. // actually is zero. In other cases (i.e. our write offset is beyond the size of a chunk) the write offset
  171. // already points to the beginning of the next chunk, in that case a result of zero indicates "use the last chunk in full".
  172. if (m_write_offset >= CHUNK_SIZE && used_size_of_last_chunk == 0)
  173. used_size_of_last_chunk = CHUNK_SIZE;
  174. // Trimming is done first to ensure that we don't unintentionally shift around if the first and last chunks are the same.
  175. search_spans[chunk_count - 1] = search_spans[chunk_count - 1].trim(used_size_of_last_chunk);
  176. search_spans[0] = search_spans[0].slice(m_read_offset);
  177. return AK::memmem(search_spans.begin(), search_spans.end(), needle);
  178. }
  179. ErrorOr<ReadonlyBytes> AllocatingMemoryStream::next_read_range()
  180. {
  181. VERIFY(m_write_offset >= m_read_offset);
  182. size_t const chunk_index = m_read_offset / CHUNK_SIZE;
  183. size_t const chunk_offset = m_read_offset % CHUNK_SIZE;
  184. size_t const read_size = min(CHUNK_SIZE - m_read_offset % CHUNK_SIZE, m_write_offset - m_read_offset);
  185. if (read_size == 0)
  186. return ReadonlyBytes { static_cast<u8*>(nullptr), 0 };
  187. VERIFY(chunk_index < m_chunks.size());
  188. return ReadonlyBytes { m_chunks[chunk_index].data() + chunk_offset, read_size };
  189. }
  190. ErrorOr<Bytes> AllocatingMemoryStream::next_write_range()
  191. {
  192. VERIFY(m_write_offset >= m_read_offset);
  193. size_t const chunk_index = m_write_offset / CHUNK_SIZE;
  194. size_t const chunk_offset = m_write_offset % CHUNK_SIZE;
  195. size_t const write_size = CHUNK_SIZE - m_write_offset % CHUNK_SIZE;
  196. if (chunk_index >= m_chunks.size())
  197. TRY(m_chunks.try_append(TRY(Chunk::create_uninitialized(CHUNK_SIZE))));
  198. VERIFY(chunk_index < m_chunks.size());
  199. return Bytes { m_chunks[chunk_index].data() + chunk_offset, write_size };
  200. }
  201. void AllocatingMemoryStream::cleanup_unused_chunks()
  202. {
  203. VERIFY(m_write_offset >= m_read_offset);
  204. auto const chunks_to_remove = m_read_offset / CHUNK_SIZE;
  205. m_chunks.remove(0, chunks_to_remove);
  206. m_read_offset -= CHUNK_SIZE * chunks_to_remove;
  207. m_write_offset -= CHUNK_SIZE * chunks_to_remove;
  208. }
  209. }