LibCore: Add AllocatingMemoryStream::offset_of
This commit is contained in:
parent
ecc202c59d
commit
156b6e83cd
Notes:
sideshowbarker
2024-07-17 10:16:43 +09:00
Author: https://github.com/timschumi Commit: https://github.com/SerenityOS/serenity/commit/156b6e83cd Pull-request: https://github.com/SerenityOS/serenity/pull/16993
3 changed files with 76 additions and 0 deletions
|
@ -523,6 +523,53 @@ TEST_CASE(allocating_memory_stream_empty)
|
|||
auto read_bytes = MUST(stream.read(array));
|
||||
EXPECT_EQ(read_bytes.size(), 0ul);
|
||||
}
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of("test"sv.bytes()));
|
||||
EXPECT(!offset.has_value());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE(allocating_memory_stream_offset_of)
|
||||
{
|
||||
Core::Stream::AllocatingMemoryStream stream;
|
||||
MUST(stream.write_entire_buffer("Well Hello Friends! :^)"sv.bytes()));
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of(" "sv.bytes()));
|
||||
EXPECT(offset.has_value());
|
||||
EXPECT_EQ(offset.value(), 4ul);
|
||||
}
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of("W"sv.bytes()));
|
||||
EXPECT(offset.has_value());
|
||||
EXPECT_EQ(offset.value(), 0ul);
|
||||
}
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of(")"sv.bytes()));
|
||||
EXPECT(offset.has_value());
|
||||
EXPECT_EQ(offset.value(), 22ul);
|
||||
}
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of("-"sv.bytes()));
|
||||
EXPECT(!offset.has_value());
|
||||
}
|
||||
|
||||
MUST(stream.discard(1));
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of("W"sv.bytes()));
|
||||
EXPECT(!offset.has_value());
|
||||
}
|
||||
|
||||
{
|
||||
auto offset = MUST(stream.offset_of("e"sv.bytes()));
|
||||
EXPECT(offset.has_value());
|
||||
EXPECT_EQ(offset.value(), 0ul);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE(allocating_memory_stream_10kb)
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <AK/FixedArray.h>
|
||||
#include <AK/MemMem.h>
|
||||
#include <LibCore/MemoryStream.h>
|
||||
|
||||
namespace Core::Stream {
|
||||
|
@ -198,6 +200,31 @@ size_t AllocatingMemoryStream::used_buffer_size() const
|
|||
return m_write_offset - m_read_offset;
|
||||
}
|
||||
|
||||
ErrorOr<Optional<size_t>> AllocatingMemoryStream::offset_of(ReadonlyBytes needle) const
|
||||
{
|
||||
VERIFY(m_write_offset >= m_read_offset);
|
||||
|
||||
if (m_chunks.size() == 0)
|
||||
return Optional<size_t> {};
|
||||
|
||||
// Ensure that we don't have to trim away more than one block.
|
||||
VERIFY(m_read_offset < chunk_size);
|
||||
VERIFY(m_chunks.size() * chunk_size - m_write_offset < chunk_size);
|
||||
|
||||
auto chunk_count = m_chunks.size();
|
||||
auto search_spans = TRY(FixedArray<ReadonlyBytes>::try_create(chunk_count));
|
||||
|
||||
for (size_t i = 0; i < chunk_count; i++) {
|
||||
search_spans[i] = m_chunks[i].span();
|
||||
}
|
||||
|
||||
// Trimming is done first to ensure that we don't unintentionally shift around if the first and last chunks are the same.
|
||||
search_spans[chunk_count - 1] = search_spans[chunk_count - 1].trim(chunk_count * chunk_size - m_write_offset);
|
||||
search_spans[0] = search_spans[0].slice(m_read_offset);
|
||||
|
||||
return AK::memmem(search_spans.begin(), search_spans.end(), needle);
|
||||
}
|
||||
|
||||
ErrorOr<ReadonlyBytes> AllocatingMemoryStream::next_read_range()
|
||||
{
|
||||
VERIFY(m_write_offset >= m_read_offset);
|
||||
|
|
|
@ -60,6 +60,8 @@ public:
|
|||
|
||||
size_t used_buffer_size() const;
|
||||
|
||||
ErrorOr<Optional<size_t>> offset_of(ReadonlyBytes needle) const;
|
||||
|
||||
private:
|
||||
// Note: We set the inline buffer capacity to zero to make moving chunks as efficient as possible.
|
||||
using Chunk = AK::Detail::ByteBuffer<0>;
|
||||
|
|
Loading…
Add table
Reference in a new issue