AK: Fix all quadratic-time append-loops over ByteBuffer

This commit is contained in:
Ben Wiederhake 2023-01-09 15:31:27 +01:00 committed by Jelle Raaijmakers
parent 0ebcc99e12
commit e147d0b572
Notes: sideshowbarker 2024-07-17 05:03:11 +09:00
2 changed files with 14 additions and 0 deletions

View file

@ -289,6 +289,12 @@ private:
NEVER_INLINE ErrorOr<void> try_ensure_capacity_slowpath(size_t new_capacity)
{
// When we are asked to raise the capacity by very small amounts,
// the caller is perhaps appending very little data in many calls.
// To avoid copying the entire ByteBuffer every single time,
// we raise the capacity exponentially, by a factor of roughly 1.5.
// This is most noticable in Lagom, where kmalloc_good_size is just a no-op.
new_capacity = max(new_capacity, (capacity() * 3) / 2);
new_capacity = kmalloc_good_size(new_capacity);
auto* new_buffer = (u8*)kmalloc(new_capacity);
if (!new_buffer)

View file

@ -33,6 +33,14 @@ TEST_CASE(equality_operator)
EXPECT_EQ(d == d, true);
}
BENCHMARK_CASE(append)
{
ByteBuffer bb;
for (size_t i = 0; i < 1000000; ++i) {
bb.append(static_cast<u8>(i));
}
}
/*
* FIXME: These `negative_*` tests should cause precisely one compilation error
* each, and always for the specified reason. Currently we do not have a harness