|
@@ -1,212 +1,180 @@
|
|
/*
|
|
/*
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
+ * Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
|
|
*
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
*/
|
|
|
|
|
|
#pragma once
|
|
#pragma once
|
|
|
|
|
|
-#include <AK/NonnullRefPtr.h>
|
|
|
|
-#include <AK/RefCounted.h>
|
|
|
|
-#include <AK/RefPtr.h>
|
|
|
|
#include <AK/Span.h>
|
|
#include <AK/Span.h>
|
|
#include <AK/Types.h>
|
|
#include <AK/Types.h>
|
|
#include <AK/kmalloc.h>
|
|
#include <AK/kmalloc.h>
|
|
|
|
|
|
namespace AK {
|
|
namespace AK {
|
|
|
|
+namespace Detail {
|
|
|
|
|
|
-class ByteBufferImpl : public RefCounted<ByteBufferImpl> {
|
|
|
|
|
|
+template<size_t inline_capacity>
|
|
|
|
+class ByteBuffer {
|
|
public:
|
|
public:
|
|
- static NonnullRefPtr<ByteBufferImpl> create_uninitialized(size_t size);
|
|
|
|
- static NonnullRefPtr<ByteBufferImpl> create_zeroed(size_t);
|
|
|
|
- static NonnullRefPtr<ByteBufferImpl> copy(const void*, size_t);
|
|
|
|
|
|
+ ByteBuffer() = default;
|
|
|
|
|
|
- void operator delete(void* ptr)
|
|
|
|
|
|
+ ~ByteBuffer()
|
|
{
|
|
{
|
|
- kfree(ptr);
|
|
|
|
|
|
+ clear();
|
|
}
|
|
}
|
|
|
|
|
|
- ByteBufferImpl() = delete;
|
|
|
|
-
|
|
|
|
- u8& operator[](size_t i)
|
|
|
|
|
|
+ ByteBuffer(ByteBuffer const& other)
|
|
{
|
|
{
|
|
- VERIFY(i < m_size);
|
|
|
|
- return m_data[i];
|
|
|
|
|
|
+ grow(other.size());
|
|
|
|
+ VERIFY(m_size == other.size());
|
|
|
|
+ VERIFY(!m_is_null);
|
|
|
|
+ __builtin_memcpy(data(), other.data(), other.size());
|
|
}
|
|
}
|
|
- const u8& operator[](size_t i) const
|
|
|
|
|
|
+
|
|
|
|
+ ByteBuffer(ByteBuffer&& other)
|
|
{
|
|
{
|
|
- VERIFY(i < m_size);
|
|
|
|
- return m_data[i];
|
|
|
|
|
|
+ move_from(move(other));
|
|
}
|
|
}
|
|
- bool is_empty() const { return !m_size; }
|
|
|
|
- size_t size() const { return m_size; }
|
|
|
|
-
|
|
|
|
- u8* data() { return m_data; }
|
|
|
|
- const u8* data() const { return m_data; }
|
|
|
|
-
|
|
|
|
- Bytes bytes() { return { data(), size() }; }
|
|
|
|
- ReadonlyBytes bytes() const { return { data(), size() }; }
|
|
|
|
-
|
|
|
|
- Span<u8> span() { return { data(), size() }; }
|
|
|
|
- Span<const u8> span() const { return { data(), size() }; }
|
|
|
|
-
|
|
|
|
- u8* offset_pointer(int offset) { return m_data + offset; }
|
|
|
|
- const u8* offset_pointer(int offset) const { return m_data + offset; }
|
|
|
|
|
|
|
|
- void* end_pointer() { return m_data + m_size; }
|
|
|
|
- const void* end_pointer() const { return m_data + m_size; }
|
|
|
|
-
|
|
|
|
- // NOTE: trim() does not reallocate.
|
|
|
|
- void trim(size_t size)
|
|
|
|
|
|
+ ByteBuffer& operator=(ByteBuffer&& other)
|
|
{
|
|
{
|
|
- VERIFY(size <= m_size);
|
|
|
|
- m_size = size;
|
|
|
|
|
|
+ if (this != &other) {
|
|
|
|
+ if (!is_inline())
|
|
|
|
+ kfree(m_outline_buffer);
|
|
|
|
+ move_from(move(other));
|
|
|
|
+ }
|
|
|
|
+ return *this;
|
|
}
|
|
}
|
|
|
|
|
|
- void zero_fill();
|
|
|
|
-
|
|
|
|
-private:
|
|
|
|
- explicit ByteBufferImpl(size_t);
|
|
|
|
-
|
|
|
|
- size_t m_size { 0 };
|
|
|
|
- u8 m_data[];
|
|
|
|
-};
|
|
|
|
|
|
+ ByteBuffer& operator=(ByteBuffer const& other)
|
|
|
|
+ {
|
|
|
|
+ if (this != &other) {
|
|
|
|
+ if (m_size > other.size())
|
|
|
|
+ internal_trim(other.size(), true);
|
|
|
|
+ else
|
|
|
|
+ grow(other.size());
|
|
|
|
+ __builtin_memcpy(data(), other.data(), other.size());
|
|
|
|
+ }
|
|
|
|
+ return *this;
|
|
|
|
+ }
|
|
|
|
|
|
-class ByteBuffer {
|
|
|
|
-public:
|
|
|
|
- ByteBuffer() = default;
|
|
|
|
- ByteBuffer(const ByteBuffer& other)
|
|
|
|
- : m_impl(other.m_impl)
|
|
|
|
|
|
+ [[nodiscard]] static ByteBuffer create_uninitialized(size_t size)
|
|
{
|
|
{
|
|
|
|
+ return ByteBuffer(size);
|
|
}
|
|
}
|
|
- ByteBuffer(ByteBuffer&& other)
|
|
|
|
- : m_impl(move(other.m_impl))
|
|
|
|
|
|
+
|
|
|
|
+ [[nodiscard]] static ByteBuffer create_zeroed(size_t size)
|
|
{
|
|
{
|
|
|
|
+ auto buffer = create_uninitialized(size);
|
|
|
|
+ buffer.zero_fill();
|
|
|
|
+ VERIFY(size == 0 || (buffer[0] == 0 && buffer[size - 1] == 0));
|
|
|
|
+ return buffer;
|
|
}
|
|
}
|
|
- ByteBuffer& operator=(ByteBuffer&& other)
|
|
|
|
|
|
+
|
|
|
|
+ [[nodiscard]] static ByteBuffer copy(void const* data, size_t size)
|
|
{
|
|
{
|
|
- if (this != &other)
|
|
|
|
- m_impl = move(other.m_impl);
|
|
|
|
- return *this;
|
|
|
|
|
|
+ auto buffer = create_uninitialized(size);
|
|
|
|
+ __builtin_memcpy(buffer.data(), data, size);
|
|
|
|
+ return buffer;
|
|
}
|
|
}
|
|
- ByteBuffer& operator=(const ByteBuffer& other)
|
|
|
|
|
|
+
|
|
|
|
+ [[nodiscard]] static ByteBuffer copy(ReadonlyBytes bytes)
|
|
{
|
|
{
|
|
- if (this != &other)
|
|
|
|
- m_impl = other.m_impl;
|
|
|
|
- return *this;
|
|
|
|
|
|
+ return copy(bytes.data(), bytes.size());
|
|
}
|
|
}
|
|
|
|
|
|
- [[nodiscard]] static ByteBuffer create_uninitialized(size_t size) { return ByteBuffer(ByteBufferImpl::create_uninitialized(size)); }
|
|
|
|
- [[nodiscard]] static ByteBuffer create_zeroed(size_t size) { return ByteBuffer(ByteBufferImpl::create_zeroed(size)); }
|
|
|
|
- [[nodiscard]] static ByteBuffer copy(const void* data, size_t size) { return ByteBuffer(ByteBufferImpl::copy(data, size)); }
|
|
|
|
- [[nodiscard]] static ByteBuffer copy(ReadonlyBytes bytes) { return ByteBuffer(ByteBufferImpl::copy(bytes.data(), bytes.size())); }
|
|
|
|
|
|
+ template<size_t other_inline_capacity>
|
|
|
|
+ bool operator==(ByteBuffer<other_inline_capacity> const& other) const
|
|
|
|
+ {
|
|
|
|
+ if (size() != other.size())
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ // So they both have data, and the same length.
|
|
|
|
+ return !__builtin_memcmp(data(), other.data(), size());
|
|
|
|
+ }
|
|
|
|
|
|
- ~ByteBuffer() { clear(); }
|
|
|
|
- void clear() { m_impl = nullptr; }
|
|
|
|
|
|
+ bool operator!=(ByteBuffer const& other) const { return !(*this == other); }
|
|
|
|
|
|
operator bool() const { return !is_null(); }
|
|
operator bool() const { return !is_null(); }
|
|
bool operator!() const { return is_null(); }
|
|
bool operator!() const { return is_null(); }
|
|
- [[nodiscard]] bool is_null() const { return m_impl == nullptr; }
|
|
|
|
-
|
|
|
|
- // Disable default implementations that would use surprising integer promotion.
|
|
|
|
- bool operator==(const ByteBuffer& other) const;
|
|
|
|
- bool operator!=(const ByteBuffer& other) const { return !(*this == other); }
|
|
|
|
- bool operator<=(const ByteBuffer& other) const = delete;
|
|
|
|
- bool operator>=(const ByteBuffer& other) const = delete;
|
|
|
|
- bool operator<(const ByteBuffer& other) const = delete;
|
|
|
|
- bool operator>(const ByteBuffer& other) const = delete;
|
|
|
|
|
|
+ [[nodiscard]] bool is_null() const { return m_is_null; }
|
|
|
|
|
|
[[nodiscard]] u8& operator[](size_t i)
|
|
[[nodiscard]] u8& operator[](size_t i)
|
|
{
|
|
{
|
|
- VERIFY(m_impl);
|
|
|
|
- return (*m_impl)[i];
|
|
|
|
|
|
+ VERIFY(i < m_size);
|
|
|
|
+ return data()[i];
|
|
}
|
|
}
|
|
- [[nodiscard]] u8 operator[](size_t i) const
|
|
|
|
|
|
+
|
|
|
|
+ [[nodiscard]] u8 const& operator[](size_t i) const
|
|
{
|
|
{
|
|
- VERIFY(m_impl);
|
|
|
|
- return (*m_impl)[i];
|
|
|
|
|
|
+ VERIFY(i < m_size);
|
|
|
|
+ return data()[i];
|
|
}
|
|
}
|
|
- [[nodiscard]] bool is_empty() const { return !m_impl || m_impl->is_empty(); }
|
|
|
|
- [[nodiscard]] size_t size() const { return m_impl ? m_impl->size() : 0; }
|
|
|
|
|
|
|
|
- [[nodiscard]] u8* data() { return m_impl ? m_impl->data() : nullptr; }
|
|
|
|
- [[nodiscard]] const u8* data() const { return m_impl ? m_impl->data() : nullptr; }
|
|
|
|
|
|
+ [[nodiscard]] bool is_empty() const { return !m_size; }
|
|
|
|
+ [[nodiscard]] size_t size() const { return m_size; }
|
|
|
|
|
|
- [[nodiscard]] Bytes bytes()
|
|
|
|
- {
|
|
|
|
- if (m_impl) {
|
|
|
|
- return m_impl->bytes();
|
|
|
|
- }
|
|
|
|
- return {};
|
|
|
|
- }
|
|
|
|
- [[nodiscard]] ReadonlyBytes bytes() const
|
|
|
|
- {
|
|
|
|
- if (m_impl) {
|
|
|
|
- return m_impl->bytes();
|
|
|
|
- }
|
|
|
|
- return {};
|
|
|
|
- }
|
|
|
|
|
|
+ [[nodiscard]] u8* data() { return is_inline() ? m_inline_buffer : m_outline_buffer; }
|
|
|
|
+ [[nodiscard]] u8 const* data() const { return is_inline() ? m_inline_buffer : m_outline_buffer; }
|
|
|
|
|
|
- [[nodiscard]] Span<u8> span()
|
|
|
|
- {
|
|
|
|
- if (m_impl) {
|
|
|
|
- return m_impl->span();
|
|
|
|
- }
|
|
|
|
- return {};
|
|
|
|
- }
|
|
|
|
- [[nodiscard]] Span<const u8> span() const
|
|
|
|
- {
|
|
|
|
- if (m_impl) {
|
|
|
|
- return m_impl->span();
|
|
|
|
- }
|
|
|
|
- return {};
|
|
|
|
- }
|
|
|
|
|
|
+ [[nodiscard]] Bytes bytes() { return { data(), size() }; }
|
|
|
|
+ [[nodiscard]] ReadonlyBytes bytes() const { return { data(), size() }; }
|
|
|
|
|
|
- [[nodiscard]] u8* offset_pointer(int offset) { return m_impl ? m_impl->offset_pointer(offset) : nullptr; }
|
|
|
|
- [[nodiscard]] const u8* offset_pointer(int offset) const { return m_impl ? m_impl->offset_pointer(offset) : nullptr; }
|
|
|
|
|
|
+ [[nodiscard]] AK::Span<u8> span() { return { data(), size() }; }
|
|
|
|
+ [[nodiscard]] AK::Span<const u8> span() const { return { data(), size() }; }
|
|
|
|
|
|
- [[nodiscard]] void* end_pointer() { return m_impl ? m_impl->end_pointer() : nullptr; }
|
|
|
|
- [[nodiscard]] const void* end_pointer() const { return m_impl ? m_impl->end_pointer() : nullptr; }
|
|
|
|
|
|
+ [[nodiscard]] u8* offset_pointer(int offset) { return data() + offset; }
|
|
|
|
+ [[nodiscard]] u8 const* offset_pointer(int offset) const { return data() + offset; }
|
|
|
|
|
|
- [[nodiscard]] ByteBuffer isolated_copy() const
|
|
|
|
- {
|
|
|
|
- if (!m_impl)
|
|
|
|
- return {};
|
|
|
|
- return copy(m_impl->data(), m_impl->size());
|
|
|
|
- }
|
|
|
|
|
|
+ [[nodiscard]] void* end_pointer() { return data() + m_size; }
|
|
|
|
+ [[nodiscard]] void const* end_pointer() const { return data() + m_size; }
|
|
|
|
|
|
- // NOTE: trim() does not reallocate.
|
|
|
|
void trim(size_t size)
|
|
void trim(size_t size)
|
|
{
|
|
{
|
|
- if (m_impl)
|
|
|
|
- m_impl->trim(size);
|
|
|
|
|
|
+ internal_trim(size, false);
|
|
}
|
|
}
|
|
|
|
|
|
[[nodiscard]] ByteBuffer slice(size_t offset, size_t size) const
|
|
[[nodiscard]] ByteBuffer slice(size_t offset, size_t size) const
|
|
{
|
|
{
|
|
- if (is_null())
|
|
|
|
- return {};
|
|
|
|
-
|
|
|
|
- if (offset == 0 && size == this->size())
|
|
|
|
- return *this;
|
|
|
|
-
|
|
|
|
// I cannot hand you a slice I don't have
|
|
// I cannot hand you a slice I don't have
|
|
VERIFY(offset + size <= this->size());
|
|
VERIFY(offset + size <= this->size());
|
|
|
|
|
|
return copy(offset_pointer(offset), size);
|
|
return copy(offset_pointer(offset), size);
|
|
}
|
|
}
|
|
|
|
|
|
- void grow(size_t size)
|
|
|
|
|
|
+ void clear()
|
|
|
|
+ {
|
|
|
|
+ if (!is_inline())
|
|
|
|
+ kfree(m_outline_buffer);
|
|
|
|
+ m_size = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ void grow(size_t new_size)
|
|
{
|
|
{
|
|
- if (m_impl && size < m_impl->size())
|
|
|
|
|
|
+ m_is_null = false;
|
|
|
|
+ if (new_size <= m_size)
|
|
return;
|
|
return;
|
|
- auto new_impl = ByteBufferImpl::create_uninitialized(size);
|
|
|
|
- if (m_impl)
|
|
|
|
- __builtin_memcpy(new_impl->data(), m_impl->data(), m_impl->size());
|
|
|
|
- m_impl = new_impl;
|
|
|
|
|
|
+ if (new_size <= capacity()) {
|
|
|
|
+ m_size = new_size;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ u8* new_buffer;
|
|
|
|
+ auto new_capacity = kmalloc_good_size(new_size);
|
|
|
|
+ if (!is_inline()) {
|
|
|
|
+ new_buffer = (u8*)krealloc(m_outline_buffer, new_capacity);
|
|
|
|
+ VERIFY(new_buffer);
|
|
|
|
+ } else {
|
|
|
|
+ new_buffer = (u8*)kmalloc(new_capacity);
|
|
|
|
+ VERIFY(new_buffer);
|
|
|
|
+ __builtin_memcpy(new_buffer, data(), m_size);
|
|
|
|
+ }
|
|
|
|
+ m_outline_buffer = new_buffer;
|
|
|
|
+ m_outline_capacity = new_capacity;
|
|
|
|
+ m_size = new_size;
|
|
}
|
|
}
|
|
|
|
|
|
- void append(const void* data, size_t data_size)
|
|
|
|
|
|
+ void append(void const* data, size_t data_size)
|
|
{
|
|
{
|
|
if (data_size == 0)
|
|
if (data_size == 0)
|
|
return;
|
|
return;
|
|
@@ -216,12 +184,12 @@ public:
|
|
__builtin_memcpy(this->data() + old_size, data, data_size);
|
|
__builtin_memcpy(this->data() + old_size, data, data_size);
|
|
}
|
|
}
|
|
|
|
|
|
- void operator+=(const ByteBuffer& other)
|
|
|
|
|
|
+ void operator+=(ByteBuffer const& other)
|
|
{
|
|
{
|
|
append(other.data(), other.size());
|
|
append(other.data(), other.size());
|
|
}
|
|
}
|
|
|
|
|
|
- void overwrite(size_t offset, const void* data, size_t data_size)
|
|
|
|
|
|
+ void overwrite(size_t offset, void const* data, size_t data_size)
|
|
{
|
|
{
|
|
// make sure we're not told to write past the end
|
|
// make sure we're not told to write past the end
|
|
VERIFY(offset + data_size <= size());
|
|
VERIFY(offset + data_size <= size());
|
|
@@ -230,53 +198,59 @@ public:
|
|
|
|
|
|
void zero_fill()
|
|
void zero_fill()
|
|
{
|
|
{
|
|
- m_impl->zero_fill();
|
|
|
|
|
|
+ __builtin_memset(data(), 0, m_size);
|
|
}
|
|
}
|
|
|
|
|
|
operator Bytes() { return bytes(); }
|
|
operator Bytes() { return bytes(); }
|
|
operator ReadonlyBytes() const { return bytes(); }
|
|
operator ReadonlyBytes() const { return bytes(); }
|
|
|
|
|
|
private:
|
|
private:
|
|
- explicit ByteBuffer(RefPtr<ByteBufferImpl>&& impl)
|
|
|
|
- : m_impl(move(impl))
|
|
|
|
|
|
+ ByteBuffer(size_t size)
|
|
{
|
|
{
|
|
|
|
+ grow(size);
|
|
|
|
+ VERIFY(!m_is_null);
|
|
|
|
+ VERIFY(m_size == size);
|
|
}
|
|
}
|
|
|
|
|
|
- RefPtr<ByteBufferImpl> m_impl;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-inline ByteBufferImpl::ByteBufferImpl(size_t size)
|
|
|
|
- : m_size(size)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
|
|
+ void move_from(ByteBuffer&& other)
|
|
|
|
+ {
|
|
|
|
+ m_is_null = other.m_is_null;
|
|
|
|
+ m_size = other.m_size;
|
|
|
|
+ if (other.m_size > inline_capacity) {
|
|
|
|
+ m_outline_buffer = other.m_outline_buffer;
|
|
|
|
+ m_outline_capacity = other.m_outline_capacity;
|
|
|
|
+ } else
|
|
|
|
+ __builtin_memcpy(m_inline_buffer, other.m_inline_buffer, other.m_size);
|
|
|
|
+ other.m_is_null = true;
|
|
|
|
+ other.m_size = 0;
|
|
|
|
+ }
|
|
|
|
|
|
-inline void ByteBufferImpl::zero_fill()
|
|
|
|
-{
|
|
|
|
- __builtin_memset(m_data, 0, m_size);
|
|
|
|
-}
|
|
|
|
|
|
+ void internal_trim(size_t size, bool may_discard_existing_data)
|
|
|
|
+ {
|
|
|
|
+ VERIFY(size <= m_size);
|
|
|
|
+ if (!is_inline() && size <= inline_capacity) {
|
|
|
|
+ // m_inline_buffer and m_outline_buffer are part of a union, so save the pointer
|
|
|
|
+ auto outline_buffer = m_outline_buffer;
|
|
|
|
+ if (!may_discard_existing_data)
|
|
|
|
+ __builtin_memcpy(m_inline_buffer, outline_buffer, size);
|
|
|
|
+ kfree(outline_buffer);
|
|
|
|
+ }
|
|
|
|
+ m_size = size;
|
|
|
|
+ }
|
|
|
|
|
|
-inline NonnullRefPtr<ByteBufferImpl> ByteBufferImpl::create_uninitialized(size_t size)
|
|
|
|
-{
|
|
|
|
- auto* buffer = kmalloc(sizeof(ByteBufferImpl) + size);
|
|
|
|
- VERIFY(buffer);
|
|
|
|
- return ::adopt_ref(*new (buffer) ByteBufferImpl(size));
|
|
|
|
-}
|
|
|
|
|
|
+ bool is_inline() const { return m_size <= inline_capacity; }
|
|
|
|
+ size_t capacity() const { return is_inline() ? inline_capacity : m_outline_capacity; }
|
|
|
|
|
|
-inline NonnullRefPtr<ByteBufferImpl> ByteBufferImpl::create_zeroed(size_t size)
|
|
|
|
-{
|
|
|
|
- auto buffer = create_uninitialized(size);
|
|
|
|
- if (size != 0)
|
|
|
|
- __builtin_memset(buffer->data(), 0, size);
|
|
|
|
- return buffer;
|
|
|
|
-}
|
|
|
|
|
|
+ size_t m_size { 0 };
|
|
|
|
+ bool m_is_null { true };
|
|
|
|
+ union {
|
|
|
|
+ u8 m_inline_buffer[inline_capacity];
|
|
|
|
+ struct {
|
|
|
|
+ u8* m_outline_buffer;
|
|
|
|
+ size_t m_outline_capacity;
|
|
|
|
+ };
|
|
|
|
+ };
|
|
|
|
+};
|
|
|
|
|
|
-inline NonnullRefPtr<ByteBufferImpl> ByteBufferImpl::copy(const void* data, size_t size)
|
|
|
|
-{
|
|
|
|
- auto buffer = create_uninitialized(size);
|
|
|
|
- __builtin_memcpy(buffer->data(), data, size);
|
|
|
|
- return buffer;
|
|
|
|
}
|
|
}
|
|
-
|
|
|
|
}
|
|
}
|
|
-
|
|
|
|
-using AK::ByteBuffer;
|
|
|