Everywhere: Rename ASSERT => VERIFY
(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED) Since all of these checks are done in release builds as well, let's rename them to VERIFY to prevent confusion, as everyone is used to assertions being compiled out in release. We can introduce a new ASSERT macro that is specifically for debug checks, but I'm doing this wholesale conversion first since we've accumulated thousands of these already, and it's not immediately obvious which ones are suitable for ASSERT.
This commit is contained in:
parent
b33a6a443e
commit
5d180d1f99
Notes:
sideshowbarker
2024-07-18 21:58:46 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/5d180d1f996
725 changed files with 3448 additions and 3448 deletions
|
@ -43,12 +43,12 @@ struct Array {
|
||||||
|
|
||||||
constexpr const T& at(size_t index) const
|
constexpr const T& at(size_t index) const
|
||||||
{
|
{
|
||||||
ASSERT(index < size());
|
VERIFY(index < size());
|
||||||
return (*this)[index];
|
return (*this)[index];
|
||||||
}
|
}
|
||||||
constexpr T& at(size_t index)
|
constexpr T& at(size_t index)
|
||||||
{
|
{
|
||||||
ASSERT(index < size());
|
VERIFY(index < size());
|
||||||
return (*this)[index];
|
return (*this)[index];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,9 +31,9 @@
|
||||||
#else
|
#else
|
||||||
# include <assert.h>
|
# include <assert.h>
|
||||||
# ifndef __serenity__
|
# ifndef __serenity__
|
||||||
# define ASSERT assert
|
# define VERIFY assert
|
||||||
# define ASSERT_NOT_REACHED() assert(false)
|
# define VERIFY_NOT_REACHED() assert(false)
|
||||||
# define RELEASE_ASSERT assert
|
# define RELEASE_ASSERT assert
|
||||||
# define TODO ASSERT_NOT_REACHED
|
# define TODO VERIFY_NOT_REACHED
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
30
AK/Bitmap.h
30
AK/Bitmap.h
|
@ -85,12 +85,12 @@ public:
|
||||||
size_t size_in_bytes() const { return ceil_div(m_size, static_cast<size_t>(8)); }
|
size_t size_in_bytes() const { return ceil_div(m_size, static_cast<size_t>(8)); }
|
||||||
bool get(size_t index) const
|
bool get(size_t index) const
|
||||||
{
|
{
|
||||||
ASSERT(index < m_size);
|
VERIFY(index < m_size);
|
||||||
return 0 != (m_data[index / 8] & (1u << (index % 8)));
|
return 0 != (m_data[index / 8] & (1u << (index % 8)));
|
||||||
}
|
}
|
||||||
void set(size_t index, bool value) const
|
void set(size_t index, bool value) const
|
||||||
{
|
{
|
||||||
ASSERT(index < m_size);
|
VERIFY(index < m_size);
|
||||||
if (value)
|
if (value)
|
||||||
m_data[index / 8] |= static_cast<u8>((1u << (index % 8)));
|
m_data[index / 8] |= static_cast<u8>((1u << (index % 8)));
|
||||||
else
|
else
|
||||||
|
@ -104,8 +104,8 @@ public:
|
||||||
|
|
||||||
size_t count_in_range(size_t start, size_t len, bool value) const
|
size_t count_in_range(size_t start, size_t len, bool value) const
|
||||||
{
|
{
|
||||||
ASSERT(start < m_size);
|
VERIFY(start < m_size);
|
||||||
ASSERT(start + len <= m_size);
|
VERIFY(start + len <= m_size);
|
||||||
if (len == 0)
|
if (len == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -153,8 +153,8 @@ public:
|
||||||
|
|
||||||
void grow(size_t size, bool default_value)
|
void grow(size_t size, bool default_value)
|
||||||
{
|
{
|
||||||
ASSERT(m_owned);
|
VERIFY(m_owned);
|
||||||
ASSERT(size > m_size);
|
VERIFY(size > m_size);
|
||||||
|
|
||||||
auto previous_size_bytes = size_in_bytes();
|
auto previous_size_bytes = size_in_bytes();
|
||||||
auto previous_size = m_size;
|
auto previous_size = m_size;
|
||||||
|
@ -176,8 +176,8 @@ public:
|
||||||
template<bool VALUE>
|
template<bool VALUE>
|
||||||
void set_range(size_t start, size_t len)
|
void set_range(size_t start, size_t len)
|
||||||
{
|
{
|
||||||
ASSERT(start < m_size);
|
VERIFY(start < m_size);
|
||||||
ASSERT(start + len <= m_size);
|
VERIFY(start + len <= m_size);
|
||||||
if (len == 0)
|
if (len == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ public:
|
||||||
template<bool VALUE>
|
template<bool VALUE>
|
||||||
Optional<size_t> find_one_anywhere(size_t hint = 0) const
|
Optional<size_t> find_one_anywhere(size_t hint = 0) const
|
||||||
{
|
{
|
||||||
ASSERT(hint < m_size);
|
VERIFY(hint < m_size);
|
||||||
const u8* end = &m_data[m_size / 8];
|
const u8* end = &m_data[m_size / 8];
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -249,7 +249,7 @@ public:
|
||||||
byte = m_data[i];
|
byte = m_data[i];
|
||||||
if constexpr (!VALUE)
|
if constexpr (!VALUE)
|
||||||
byte = ~byte;
|
byte = ~byte;
|
||||||
ASSERT(byte != 0);
|
VERIFY(byte != 0);
|
||||||
return i * 8 + __builtin_ffs(byte) - 1;
|
return i * 8 + __builtin_ffs(byte) - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -264,7 +264,7 @@ public:
|
||||||
u8 byte = VALUE ? 0x00 : 0xff;
|
u8 byte = VALUE ? 0x00 : 0xff;
|
||||||
size_t i = (const u8*)ptr32 - &m_data[0];
|
size_t i = (const u8*)ptr32 - &m_data[0];
|
||||||
size_t byte_count = m_size / 8;
|
size_t byte_count = m_size / 8;
|
||||||
ASSERT(i <= byte_count);
|
VERIFY(i <= byte_count);
|
||||||
while (i < byte_count && m_data[i] == byte)
|
while (i < byte_count && m_data[i] == byte)
|
||||||
i++;
|
i++;
|
||||||
if (i == byte_count) {
|
if (i == byte_count) {
|
||||||
|
@ -279,7 +279,7 @@ public:
|
||||||
byte = m_data[i];
|
byte = m_data[i];
|
||||||
if constexpr (!VALUE)
|
if constexpr (!VALUE)
|
||||||
byte = ~byte;
|
byte = ~byte;
|
||||||
ASSERT(byte != 0);
|
VERIFY(byte != 0);
|
||||||
return i * 8 + __builtin_ffs(byte) - 1;
|
return i * 8 + __builtin_ffs(byte) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ public:
|
||||||
val32 = *ptr32;
|
val32 = *ptr32;
|
||||||
if constexpr (!VALUE)
|
if constexpr (!VALUE)
|
||||||
val32 = ~val32;
|
val32 = ~val32;
|
||||||
ASSERT(val32 != 0);
|
VERIFY(val32 != 0);
|
||||||
return ((const u8*)ptr32 - &m_data[0]) * 8 + __builtin_ffsl(val32) - 1;
|
return ((const u8*)ptr32 - &m_data[0]) * 8 + __builtin_ffsl(val32) - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -317,7 +317,7 @@ public:
|
||||||
byte = m_data[i];
|
byte = m_data[i];
|
||||||
if constexpr (!VALUE)
|
if constexpr (!VALUE)
|
||||||
byte = ~byte;
|
byte = ~byte;
|
||||||
ASSERT(byte != 0);
|
VERIFY(byte != 0);
|
||||||
return i * 8 + __builtin_ffs(byte) - 1;
|
return i * 8 + __builtin_ffs(byte) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,7 +509,7 @@ public:
|
||||||
: m_size(size)
|
: m_size(size)
|
||||||
, m_owned(true)
|
, m_owned(true)
|
||||||
{
|
{
|
||||||
ASSERT(m_size != 0);
|
VERIFY(m_size != 0);
|
||||||
m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
|
m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
|
||||||
fill(default_value);
|
fill(default_value);
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,12 +54,12 @@ public:
|
||||||
|
|
||||||
u8& operator[](size_t i)
|
u8& operator[](size_t i)
|
||||||
{
|
{
|
||||||
ASSERT(i < m_size);
|
VERIFY(i < m_size);
|
||||||
return m_data[i];
|
return m_data[i];
|
||||||
}
|
}
|
||||||
const u8& operator[](size_t i) const
|
const u8& operator[](size_t i) const
|
||||||
{
|
{
|
||||||
ASSERT(i < m_size);
|
VERIFY(i < m_size);
|
||||||
return m_data[i];
|
return m_data[i];
|
||||||
}
|
}
|
||||||
bool is_empty() const { return !m_size; }
|
bool is_empty() const { return !m_size; }
|
||||||
|
@ -83,7 +83,7 @@ public:
|
||||||
// NOTE: trim() does not reallocate.
|
// NOTE: trim() does not reallocate.
|
||||||
void trim(size_t size)
|
void trim(size_t size)
|
||||||
{
|
{
|
||||||
ASSERT(size <= m_size);
|
VERIFY(size <= m_size);
|
||||||
m_size = size;
|
m_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,12 +145,12 @@ public:
|
||||||
|
|
||||||
u8& operator[](size_t i)
|
u8& operator[](size_t i)
|
||||||
{
|
{
|
||||||
ASSERT(m_impl);
|
VERIFY(m_impl);
|
||||||
return (*m_impl)[i];
|
return (*m_impl)[i];
|
||||||
}
|
}
|
||||||
u8 operator[](size_t i) const
|
u8 operator[](size_t i) const
|
||||||
{
|
{
|
||||||
ASSERT(m_impl);
|
VERIFY(m_impl);
|
||||||
return (*m_impl)[i];
|
return (*m_impl)[i];
|
||||||
}
|
}
|
||||||
bool is_empty() const { return !m_impl || m_impl->is_empty(); }
|
bool is_empty() const { return !m_impl || m_impl->is_empty(); }
|
||||||
|
@ -215,7 +215,7 @@ public:
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
// I cannot hand you a slice I don't have
|
// I cannot hand you a slice I don't have
|
||||||
ASSERT(offset + size <= this->size());
|
VERIFY(offset + size <= this->size());
|
||||||
|
|
||||||
return copy(offset_pointer(offset), size);
|
return copy(offset_pointer(offset), size);
|
||||||
}
|
}
|
||||||
|
@ -232,7 +232,7 @@ public:
|
||||||
{
|
{
|
||||||
if (data_size == 0)
|
if (data_size == 0)
|
||||||
return;
|
return;
|
||||||
ASSERT(data != nullptr);
|
VERIFY(data != nullptr);
|
||||||
int old_size = size();
|
int old_size = size();
|
||||||
grow(size() + data_size);
|
grow(size() + data_size);
|
||||||
__builtin_memcpy(this->data() + old_size, data, data_size);
|
__builtin_memcpy(this->data() + old_size, data, data_size);
|
||||||
|
@ -246,7 +246,7 @@ public:
|
||||||
void overwrite(size_t offset, const void* data, size_t data_size)
|
void overwrite(size_t offset, const void* data, size_t data_size)
|
||||||
{
|
{
|
||||||
// make sure we're not told to write past the end
|
// make sure we're not told to write past the end
|
||||||
ASSERT(offset + data_size <= size());
|
VERIFY(offset + data_size <= size());
|
||||||
__builtin_memcpy(this->data() + offset, data, data_size);
|
__builtin_memcpy(this->data() + offset, data, data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,7 +285,7 @@ inline ByteBufferImpl::ByteBufferImpl(const void* data, size_t size)
|
||||||
|
|
||||||
inline void ByteBufferImpl::grow(size_t size)
|
inline void ByteBufferImpl::grow(size_t size)
|
||||||
{
|
{
|
||||||
ASSERT(size > m_size);
|
VERIFY(size > m_size);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
if (m_data)
|
if (m_data)
|
||||||
kfree(m_data);
|
kfree(m_data);
|
||||||
|
|
|
@ -156,13 +156,13 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE constexpr bool operator!() const
|
ALWAYS_INLINE constexpr bool operator!() const
|
||||||
{
|
{
|
||||||
ASSERT(!m_overflow);
|
VERIFY(!m_overflow);
|
||||||
return !m_value;
|
return !m_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE constexpr T value() const
|
ALWAYS_INLINE constexpr T value() const
|
||||||
{
|
{
|
||||||
ASSERT(!m_overflow);
|
VERIFY(!m_overflow);
|
||||||
return m_value;
|
return m_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
#ifndef DBGLN_NO_COMPILETIME_FORMAT_CHECK
|
#ifndef DBGLN_NO_COMPILETIME_FORMAT_CHECK
|
||||||
namespace AK::Format::Detail {
|
namespace AK::Format::Detail {
|
||||||
|
|
||||||
// We have to define a local "purely constexpr" Array that doesn't lead back to us (via e.g. ASSERT)
|
// We have to define a local "purely constexpr" Array that doesn't lead back to us (via e.g. VERIFY)
|
||||||
template<typename T, size_t Size>
|
template<typename T, size_t Size>
|
||||||
struct Array {
|
struct Array {
|
||||||
constexpr static size_t size() { return Size; }
|
constexpr static size_t size() { return Size; }
|
||||||
|
|
|
@ -50,7 +50,7 @@ public:
|
||||||
|
|
||||||
T dequeue_end()
|
T dequeue_end()
|
||||||
{
|
{
|
||||||
ASSERT(!this->is_empty());
|
VERIFY(!this->is_empty());
|
||||||
auto& slot = this->elements()[(this->m_head + this->m_size - 1) % Capacity];
|
auto& slot = this->elements()[(this->m_head + this->m_size - 1) % Capacity];
|
||||||
T value = move(slot);
|
T value = move(slot);
|
||||||
slot.~T();
|
slot.~T();
|
||||||
|
|
|
@ -55,7 +55,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto nwritten = write(bytes);
|
const auto nwritten = write(bytes);
|
||||||
ASSERT(nwritten == bytes.size());
|
VERIFY(nwritten == bytes.size());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ public:
|
||||||
|
|
||||||
Bytes reserve_contigous_space(size_t count)
|
Bytes reserve_contigous_space(size_t count)
|
||||||
{
|
{
|
||||||
ASSERT(count <= remaining_contigous_space());
|
VERIFY(count <= remaining_contigous_space());
|
||||||
|
|
||||||
Bytes bytes { m_queue.m_storage + (m_queue.head_index() + m_queue.size()) % Capacity, count };
|
Bytes bytes { m_queue.m_storage + (m_queue.head_index() + m_queue.size()) % Capacity, count };
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ public:
|
||||||
|
|
||||||
T dequeue()
|
T dequeue()
|
||||||
{
|
{
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
auto& slot = elements()[m_head];
|
auto& slot = elements()[m_head];
|
||||||
T value = move(slot);
|
T value = move(slot);
|
||||||
slot.~T();
|
slot.~T();
|
||||||
|
|
|
@ -91,22 +91,22 @@ public:
|
||||||
|
|
||||||
T& first()
|
T& first()
|
||||||
{
|
{
|
||||||
ASSERT(m_head);
|
VERIFY(m_head);
|
||||||
return m_head->value;
|
return m_head->value;
|
||||||
}
|
}
|
||||||
const T& first() const
|
const T& first() const
|
||||||
{
|
{
|
||||||
ASSERT(m_head);
|
VERIFY(m_head);
|
||||||
return m_head->value;
|
return m_head->value;
|
||||||
}
|
}
|
||||||
T& last()
|
T& last()
|
||||||
{
|
{
|
||||||
ASSERT(m_head);
|
VERIFY(m_head);
|
||||||
return m_tail->value;
|
return m_tail->value;
|
||||||
}
|
}
|
||||||
const T& last() const
|
const T& last() const
|
||||||
{
|
{
|
||||||
ASSERT(m_head);
|
VERIFY(m_head);
|
||||||
return m_tail->value;
|
return m_tail->value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,13 +117,13 @@ public:
|
||||||
requires { T(value); }, "Conversion operator is missing.");
|
requires { T(value); }, "Conversion operator is missing.");
|
||||||
auto* node = new Node(forward<U>(value));
|
auto* node = new Node(forward<U>(value));
|
||||||
if (!m_head) {
|
if (!m_head) {
|
||||||
ASSERT(!m_tail);
|
VERIFY(!m_tail);
|
||||||
m_head = node;
|
m_head = node;
|
||||||
m_tail = node;
|
m_tail = node;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ASSERT(m_tail);
|
VERIFY(m_tail);
|
||||||
ASSERT(!node->next);
|
VERIFY(!node->next);
|
||||||
m_tail->next = node;
|
m_tail->next = node;
|
||||||
node->prev = m_tail;
|
node->prev = m_tail;
|
||||||
m_tail = node;
|
m_tail = node;
|
||||||
|
@ -135,13 +135,13 @@ public:
|
||||||
static_assert(IsSame<T, U>::value);
|
static_assert(IsSame<T, U>::value);
|
||||||
auto* node = new Node(forward<U>(value));
|
auto* node = new Node(forward<U>(value));
|
||||||
if (!m_head) {
|
if (!m_head) {
|
||||||
ASSERT(!m_tail);
|
VERIFY(!m_tail);
|
||||||
m_head = node;
|
m_head = node;
|
||||||
m_tail = node;
|
m_tail = node;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ASSERT(m_tail);
|
VERIFY(m_tail);
|
||||||
ASSERT(!node->prev);
|
VERIFY(!node->prev);
|
||||||
m_head->prev = node;
|
m_head->prev = node;
|
||||||
node->next = m_head;
|
node->next = m_head;
|
||||||
m_head = node;
|
m_head = node;
|
||||||
|
@ -174,20 +174,20 @@ public:
|
||||||
|
|
||||||
void remove(Iterator it)
|
void remove(Iterator it)
|
||||||
{
|
{
|
||||||
ASSERT(it.m_node);
|
VERIFY(it.m_node);
|
||||||
auto* node = it.m_node;
|
auto* node = it.m_node;
|
||||||
if (node->prev) {
|
if (node->prev) {
|
||||||
ASSERT(node != m_head);
|
VERIFY(node != m_head);
|
||||||
node->prev->next = node->next;
|
node->prev->next = node->next;
|
||||||
} else {
|
} else {
|
||||||
ASSERT(node == m_head);
|
VERIFY(node == m_head);
|
||||||
m_head = node->next;
|
m_head = node->next;
|
||||||
}
|
}
|
||||||
if (node->next) {
|
if (node->next) {
|
||||||
ASSERT(node != m_tail);
|
VERIFY(node != m_tail);
|
||||||
node->next->prev = node->prev;
|
node->next->prev = node->prev;
|
||||||
} else {
|
} else {
|
||||||
ASSERT(node == m_tail);
|
VERIFY(node == m_tail);
|
||||||
m_tail = node->prev;
|
m_tail = node->prev;
|
||||||
}
|
}
|
||||||
delete node;
|
delete node;
|
||||||
|
|
|
@ -38,8 +38,8 @@ struct FlyStringImplTraits : public AK::Traits<StringImpl*> {
|
||||||
static unsigned hash(const StringImpl* s) { return s ? s->hash() : 0; }
|
static unsigned hash(const StringImpl* s) { return s ? s->hash() : 0; }
|
||||||
static bool equals(const StringImpl* a, const StringImpl* b)
|
static bool equals(const StringImpl* a, const StringImpl* b)
|
||||||
{
|
{
|
||||||
ASSERT(a);
|
VERIFY(a);
|
||||||
ASSERT(b);
|
VERIFY(b);
|
||||||
return *a == *b;
|
return *a == *b;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -70,7 +70,7 @@ FlyString::FlyString(const String& string)
|
||||||
string.impl()->set_fly({}, true);
|
string.impl()->set_fly({}, true);
|
||||||
m_impl = string.impl();
|
m_impl = string.impl();
|
||||||
} else {
|
} else {
|
||||||
ASSERT((*it)->is_fly());
|
VERIFY((*it)->is_fly());
|
||||||
m_impl = *it;
|
m_impl = *it;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ constexpr size_t use_next_index = NumericLimits<size_t>::max();
|
||||||
// 65 bytes. Choosing a larger power of two won't hurt and is a bit of mitigation against out-of-bounds accesses.
|
// 65 bytes. Choosing a larger power of two won't hurt and is a bit of mitigation against out-of-bounds accesses.
|
||||||
inline size_t convert_unsigned_to_string(u64 value, Array<u8, 128>& buffer, u8 base, bool upper_case)
|
inline size_t convert_unsigned_to_string(u64 value, Array<u8, 128>& buffer, u8 base, bool upper_case)
|
||||||
{
|
{
|
||||||
ASSERT(base >= 2 && base <= 16);
|
VERIFY(base >= 2 && base <= 16);
|
||||||
|
|
||||||
static constexpr const char* lowercase_lookup = "0123456789abcdef";
|
static constexpr const char* lowercase_lookup = "0123456789abcdef";
|
||||||
static constexpr const char* uppercase_lookup = "0123456789ABCDEF";
|
static constexpr const char* uppercase_lookup = "0123456789ABCDEF";
|
||||||
|
@ -80,7 +80,7 @@ void vformat_impl(TypeErasedFormatParams& params, FormatBuilder& builder, Format
|
||||||
|
|
||||||
FormatParser::FormatSpecifier specifier;
|
FormatParser::FormatSpecifier specifier;
|
||||||
if (!parser.consume_specifier(specifier)) {
|
if (!parser.consume_specifier(specifier)) {
|
||||||
ASSERT(parser.is_eof());
|
VERIFY(parser.is_eof());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,9 +118,9 @@ size_t TypeErasedParameter::to_size() const
|
||||||
else if (type == TypeErasedParameter::Type::Int64)
|
else if (type == TypeErasedParameter::Type::Int64)
|
||||||
svalue = *reinterpret_cast<const i64*>(value);
|
svalue = *reinterpret_cast<const i64*>(value);
|
||||||
else
|
else
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
ASSERT(svalue >= 0);
|
VERIFY(svalue >= 0);
|
||||||
|
|
||||||
return static_cast<size_t>(svalue);
|
return static_cast<size_t>(svalue);
|
||||||
}
|
}
|
||||||
|
@ -163,7 +163,7 @@ bool FormatParser::consume_number(size_t& value)
|
||||||
}
|
}
|
||||||
bool FormatParser::consume_specifier(FormatSpecifier& specifier)
|
bool FormatParser::consume_specifier(FormatSpecifier& specifier)
|
||||||
{
|
{
|
||||||
ASSERT(!next_is('}'));
|
VERIFY(!next_is('}'));
|
||||||
|
|
||||||
if (!consume_specific('{'))
|
if (!consume_specific('{'))
|
||||||
return false;
|
return false;
|
||||||
|
@ -176,7 +176,7 @@ bool FormatParser::consume_specifier(FormatSpecifier& specifier)
|
||||||
|
|
||||||
size_t level = 1;
|
size_t level = 1;
|
||||||
while (level > 0) {
|
while (level > 0) {
|
||||||
ASSERT(!is_eof());
|
VERIFY(!is_eof());
|
||||||
|
|
||||||
if (consume_specific('{')) {
|
if (consume_specific('{')) {
|
||||||
++level;
|
++level;
|
||||||
|
@ -194,7 +194,7 @@ bool FormatParser::consume_specifier(FormatSpecifier& specifier)
|
||||||
specifier.flags = m_input.substring_view(begin, tell() - begin - 1);
|
specifier.flags = m_input.substring_view(begin, tell() - begin - 1);
|
||||||
} else {
|
} else {
|
||||||
if (!consume_specific('}'))
|
if (!consume_specific('}'))
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
specifier.flags = "";
|
specifier.flags = "";
|
||||||
}
|
}
|
||||||
|
@ -210,7 +210,7 @@ bool FormatParser::consume_replacement_field(size_t& index)
|
||||||
index = use_next_index;
|
index = use_next_index;
|
||||||
|
|
||||||
if (!consume_specific('}'))
|
if (!consume_specific('}'))
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -426,7 +426,7 @@ void vformat(const LogStream& stream, StringView fmtstr, TypeErasedFormatParams
|
||||||
void StandardFormatter::parse(TypeErasedFormatParams& params, FormatParser& parser)
|
void StandardFormatter::parse(TypeErasedFormatParams& params, FormatParser& parser)
|
||||||
{
|
{
|
||||||
if (StringView { "<^>" }.contains(parser.peek(1))) {
|
if (StringView { "<^>" }.contains(parser.peek(1))) {
|
||||||
ASSERT(!parser.next_is(is_any_of("{}")));
|
VERIFY(!parser.next_is(is_any_of("{}")));
|
||||||
m_fill = parser.consume();
|
m_fill = parser.consume();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,21 +498,21 @@ void StandardFormatter::parse(TypeErasedFormatParams& params, FormatParser& pars
|
||||||
if (!parser.is_eof())
|
if (!parser.is_eof())
|
||||||
dbgln("{} did not consume '{}'", __PRETTY_FUNCTION__, parser.remaining());
|
dbgln("{} did not consume '{}'", __PRETTY_FUNCTION__, parser.remaining());
|
||||||
|
|
||||||
ASSERT(parser.is_eof());
|
VERIFY(parser.is_eof());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Formatter<StringView>::format(FormatBuilder& builder, StringView value)
|
void Formatter<StringView>::format(FormatBuilder& builder, StringView value)
|
||||||
{
|
{
|
||||||
if (m_sign_mode != FormatBuilder::SignMode::Default)
|
if (m_sign_mode != FormatBuilder::SignMode::Default)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_alternative_form)
|
if (m_alternative_form)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_zero_pad)
|
if (m_zero_pad)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_mode != Mode::Default && m_mode != Mode::String && m_mode != Mode::Character)
|
if (m_mode != Mode::Default && m_mode != Mode::String && m_mode != Mode::Character)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_width.has_value() && m_precision.has_value())
|
if (m_width.has_value() && m_precision.has_value())
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
m_width = m_width.value_or(0);
|
m_width = m_width.value_or(0);
|
||||||
m_precision = m_precision.value_or(NumericLimits<size_t>::max());
|
m_precision = m_precision.value_or(NumericLimits<size_t>::max());
|
||||||
|
@ -530,7 +530,7 @@ void Formatter<T, typename EnableIf<IsIntegral<T>::value>::Type>::format(FormatB
|
||||||
{
|
{
|
||||||
if (m_mode == Mode::Character) {
|
if (m_mode == Mode::Character) {
|
||||||
// FIXME: We just support ASCII for now, in the future maybe unicode?
|
// FIXME: We just support ASCII for now, in the future maybe unicode?
|
||||||
ASSERT(value >= 0 && value <= 127);
|
VERIFY(value >= 0 && value <= 127);
|
||||||
|
|
||||||
m_mode = Mode::String;
|
m_mode = Mode::String;
|
||||||
|
|
||||||
|
@ -539,17 +539,17 @@ void Formatter<T, typename EnableIf<IsIntegral<T>::value>::Type>::format(FormatB
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_precision.has_value())
|
if (m_precision.has_value())
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
if (m_mode == Mode::Pointer) {
|
if (m_mode == Mode::Pointer) {
|
||||||
if (m_sign_mode != FormatBuilder::SignMode::Default)
|
if (m_sign_mode != FormatBuilder::SignMode::Default)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_align != FormatBuilder::Align::Default)
|
if (m_align != FormatBuilder::Align::Default)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_alternative_form)
|
if (m_alternative_form)
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
if (m_width.has_value())
|
if (m_width.has_value())
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
m_mode = Mode::Hexadecimal;
|
m_mode = Mode::Hexadecimal;
|
||||||
m_alternative_form = true;
|
m_alternative_form = true;
|
||||||
|
@ -574,7 +574,7 @@ void Formatter<T, typename EnableIf<IsIntegral<T>::value>::Type>::format(FormatB
|
||||||
base = 16;
|
base = 16;
|
||||||
upper_case = true;
|
upper_case = true;
|
||||||
} else {
|
} else {
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
m_width = m_width.value_or(0);
|
m_width = m_width.value_or(0);
|
||||||
|
@ -621,7 +621,7 @@ void Formatter<double>::format(FormatBuilder& builder, double value)
|
||||||
base = 16;
|
base = 16;
|
||||||
upper_case = true;
|
upper_case = true;
|
||||||
} else {
|
} else {
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
m_width = m_width.value_or(0);
|
m_width = m_width.value_or(0);
|
||||||
|
@ -647,7 +647,7 @@ void vout(FILE* file, StringView fmtstr, TypeErasedFormatParams params, bool new
|
||||||
|
|
||||||
const auto string = builder.string_view();
|
const auto string = builder.string_view();
|
||||||
const auto retval = ::fwrite(string.characters_without_null_termination(), 1, string.length(), file);
|
const auto retval = ::fwrite(string.characters_without_null_termination(), 1, string.length(), file);
|
||||||
ASSERT(static_cast<size_t>(retval) == string.length());
|
VERIFY(static_cast<size_t>(retval) == string.length());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ struct TypeErasedParameter {
|
||||||
return Type::Int64;
|
return Type::Int64;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
|
|
|
@ -53,7 +53,7 @@ public:
|
||||||
|
|
||||||
Out operator()(In... in) const
|
Out operator()(In... in) const
|
||||||
{
|
{
|
||||||
ASSERT(m_callable_wrapper);
|
VERIFY(m_callable_wrapper);
|
||||||
return m_callable_wrapper->call(forward<In>(in)...);
|
return m_callable_wrapper->call(forward<In>(in)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -79,14 +79,14 @@ bool GenericLexer::next_is(const char* expected) const
|
||||||
// Go back to the previous character
|
// Go back to the previous character
|
||||||
void GenericLexer::retreat()
|
void GenericLexer::retreat()
|
||||||
{
|
{
|
||||||
ASSERT(m_index > 0);
|
VERIFY(m_index > 0);
|
||||||
m_index--;
|
m_index--;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consume a character and advance the parser index
|
// Consume a character and advance the parser index
|
||||||
char GenericLexer::consume()
|
char GenericLexer::consume()
|
||||||
{
|
{
|
||||||
ASSERT(!is_eof());
|
VERIFY(!is_eof());
|
||||||
return m_input[m_index++];
|
return m_input[m_index++];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -157,7 +157,7 @@ public:
|
||||||
|
|
||||||
void ensure_capacity(size_t capacity)
|
void ensure_capacity(size_t capacity)
|
||||||
{
|
{
|
||||||
ASSERT(capacity >= size());
|
VERIFY(capacity >= size());
|
||||||
rehash(capacity * 2);
|
rehash(capacity * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,11 +256,11 @@ public:
|
||||||
|
|
||||||
void remove(Iterator iterator)
|
void remove(Iterator iterator)
|
||||||
{
|
{
|
||||||
ASSERT(iterator.m_bucket);
|
VERIFY(iterator.m_bucket);
|
||||||
auto& bucket = *iterator.m_bucket;
|
auto& bucket = *iterator.m_bucket;
|
||||||
ASSERT(bucket.used);
|
VERIFY(bucket.used);
|
||||||
ASSERT(!bucket.end);
|
VERIFY(!bucket.end);
|
||||||
ASSERT(!bucket.deleted);
|
VERIFY(!bucket.deleted);
|
||||||
bucket.slot()->~T();
|
bucket.slot()->~T();
|
||||||
bucket.used = false;
|
bucket.used = false;
|
||||||
bucket.deleted = true;
|
bucket.deleted = true;
|
||||||
|
|
|
@ -49,7 +49,7 @@ public:
|
||||||
return allocated_id;
|
return allocated_id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void deallocate(int id)
|
void deallocate(int id)
|
||||||
|
|
|
@ -65,7 +65,7 @@ public:
|
||||||
|
|
||||||
constexpr u8 operator[](int i) const
|
constexpr u8 operator[](int i) const
|
||||||
{
|
{
|
||||||
ASSERT(i >= 0 && i < 4);
|
VERIFY(i >= 0 && i < 4);
|
||||||
return octet(SubnetClass(i));
|
return octet(SubnetClass(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,7 +178,7 @@ template<typename T>
|
||||||
inline void InlineLinkedList<T>::prepend(T* node)
|
inline void InlineLinkedList<T>::prepend(T* node)
|
||||||
{
|
{
|
||||||
if (!m_head) {
|
if (!m_head) {
|
||||||
ASSERT(!m_tail);
|
VERIFY(!m_tail);
|
||||||
m_head = node;
|
m_head = node;
|
||||||
m_tail = node;
|
m_tail = node;
|
||||||
node->set_prev(0);
|
node->set_prev(0);
|
||||||
|
@ -186,7 +186,7 @@ inline void InlineLinkedList<T>::prepend(T* node)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(m_tail);
|
VERIFY(m_tail);
|
||||||
m_head->set_prev(node);
|
m_head->set_prev(node);
|
||||||
node->set_next(m_head);
|
node->set_next(m_head);
|
||||||
node->set_prev(0);
|
node->set_prev(0);
|
||||||
|
@ -197,7 +197,7 @@ template<typename T>
|
||||||
inline void InlineLinkedList<T>::append(T* node)
|
inline void InlineLinkedList<T>::append(T* node)
|
||||||
{
|
{
|
||||||
if (!m_tail) {
|
if (!m_tail) {
|
||||||
ASSERT(!m_head);
|
VERIFY(!m_head);
|
||||||
m_head = node;
|
m_head = node;
|
||||||
m_tail = node;
|
m_tail = node;
|
||||||
node->set_prev(0);
|
node->set_prev(0);
|
||||||
|
@ -205,7 +205,7 @@ inline void InlineLinkedList<T>::append(T* node)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(m_head);
|
VERIFY(m_head);
|
||||||
m_tail->set_next(node);
|
m_tail->set_next(node);
|
||||||
node->set_prev(m_tail);
|
node->set_prev(m_tail);
|
||||||
node->set_next(0);
|
node->set_next(0);
|
||||||
|
@ -215,18 +215,18 @@ inline void InlineLinkedList<T>::append(T* node)
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void InlineLinkedList<T>::insert_before(T* before_node, T* node)
|
inline void InlineLinkedList<T>::insert_before(T* before_node, T* node)
|
||||||
{
|
{
|
||||||
ASSERT(before_node);
|
VERIFY(before_node);
|
||||||
ASSERT(node);
|
VERIFY(node);
|
||||||
ASSERT(before_node != node);
|
VERIFY(before_node != node);
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
if (m_head == before_node) {
|
if (m_head == before_node) {
|
||||||
ASSERT(!before_node->prev());
|
VERIFY(!before_node->prev());
|
||||||
m_head = node;
|
m_head = node;
|
||||||
node->set_prev(0);
|
node->set_prev(0);
|
||||||
node->set_next(before_node);
|
node->set_next(before_node);
|
||||||
before_node->set_prev(node);
|
before_node->set_prev(node);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(before_node->prev());
|
VERIFY(before_node->prev());
|
||||||
node->set_prev(before_node->prev());
|
node->set_prev(before_node->prev());
|
||||||
before_node->prev()->set_next(node);
|
before_node->prev()->set_next(node);
|
||||||
node->set_next(before_node);
|
node->set_next(before_node);
|
||||||
|
@ -237,18 +237,18 @@ inline void InlineLinkedList<T>::insert_before(T* before_node, T* node)
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void InlineLinkedList<T>::insert_after(T* after_node, T* node)
|
inline void InlineLinkedList<T>::insert_after(T* after_node, T* node)
|
||||||
{
|
{
|
||||||
ASSERT(after_node);
|
VERIFY(after_node);
|
||||||
ASSERT(node);
|
VERIFY(node);
|
||||||
ASSERT(after_node != node);
|
VERIFY(after_node != node);
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
if (m_tail == after_node) {
|
if (m_tail == after_node) {
|
||||||
ASSERT(!after_node->next());
|
VERIFY(!after_node->next());
|
||||||
m_tail = node;
|
m_tail = node;
|
||||||
node->set_prev(after_node);
|
node->set_prev(after_node);
|
||||||
node->set_next(0);
|
node->set_next(0);
|
||||||
after_node->set_next(node);
|
after_node->set_next(node);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(after_node->next());
|
VERIFY(after_node->next());
|
||||||
node->set_prev(after_node);
|
node->set_prev(after_node);
|
||||||
node->set_next(after_node->next());
|
node->set_next(after_node->next());
|
||||||
after_node->next()->set_prev(node);
|
after_node->next()->set_prev(node);
|
||||||
|
@ -260,18 +260,18 @@ template<typename T>
|
||||||
inline void InlineLinkedList<T>::remove(T* node)
|
inline void InlineLinkedList<T>::remove(T* node)
|
||||||
{
|
{
|
||||||
if (node->prev()) {
|
if (node->prev()) {
|
||||||
ASSERT(node != m_head);
|
VERIFY(node != m_head);
|
||||||
node->prev()->set_next(node->next());
|
node->prev()->set_next(node->next());
|
||||||
} else {
|
} else {
|
||||||
ASSERT(node == m_head);
|
VERIFY(node == m_head);
|
||||||
m_head = node->next();
|
m_head = node->next();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node->next()) {
|
if (node->next()) {
|
||||||
ASSERT(node != m_tail);
|
VERIFY(node != m_tail);
|
||||||
node->next()->set_prev(node->prev());
|
node->next()->set_prev(node->prev());
|
||||||
} else {
|
} else {
|
||||||
ASSERT(node == m_tail);
|
VERIFY(node == m_tail);
|
||||||
m_tail = node->prev();
|
m_tail = node->prev();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,15 +310,15 @@ inline void InlineLinkedList<T>::append(InlineLinkedList<T>& other)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(tail());
|
VERIFY(tail());
|
||||||
ASSERT(other.head());
|
VERIFY(other.head());
|
||||||
T* other_head = other.head();
|
T* other_head = other.head();
|
||||||
T* other_tail = other.tail();
|
T* other_tail = other.tail();
|
||||||
other.clear();
|
other.clear();
|
||||||
|
|
||||||
ASSERT(!m_tail->next());
|
VERIFY(!m_tail->next());
|
||||||
m_tail->set_next(other_head);
|
m_tail->set_next(other_head);
|
||||||
ASSERT(!other_head->prev());
|
VERIFY(!other_head->prev());
|
||||||
other_head->set_prev(m_tail);
|
other_head->set_prev(m_tail);
|
||||||
m_tail = other_tail;
|
m_tail = other_tail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -284,7 +284,7 @@ inline IntrusiveListNode::~IntrusiveListNode()
|
||||||
|
|
||||||
inline void IntrusiveListNode::remove()
|
inline void IntrusiveListNode::remove()
|
||||||
{
|
{
|
||||||
ASSERT(m_storage);
|
VERIFY(m_storage);
|
||||||
if (m_storage->m_first == this)
|
if (m_storage->m_first == this)
|
||||||
m_storage->m_first = m_next;
|
m_storage->m_first = m_next;
|
||||||
if (m_storage->m_last == this)
|
if (m_storage->m_last == this)
|
||||||
|
|
|
@ -92,7 +92,7 @@ public:
|
||||||
|
|
||||||
void finish()
|
void finish()
|
||||||
{
|
{
|
||||||
ASSERT(!m_finished);
|
VERIFY(!m_finished);
|
||||||
m_finished = true;
|
m_finished = true;
|
||||||
m_builder.append(']');
|
m_builder.append(']');
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,7 +191,7 @@ inline void JsonValue::serialize(Builder& builder) const
|
||||||
builder.append("null");
|
builder.append("null");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,7 @@ public:
|
||||||
|
|
||||||
void finish()
|
void finish()
|
||||||
{
|
{
|
||||||
ASSERT(!m_finished);
|
VERIFY(!m_finished);
|
||||||
m_finished = true;
|
m_finished = true;
|
||||||
m_builder.append('}');
|
m_builder.append('}');
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ JsonValue JsonPath::resolve(const JsonValue& top_root) const
|
||||||
root = JsonValue { root.as_array().at(element.index()) };
|
root = JsonValue { root.as_array().at(element.index()) };
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return root;
|
return root;
|
||||||
|
|
|
@ -56,13 +56,13 @@ public:
|
||||||
Kind kind() const { return m_kind; }
|
Kind kind() const { return m_kind; }
|
||||||
const String& key() const
|
const String& key() const
|
||||||
{
|
{
|
||||||
ASSERT(m_kind == Kind::Key);
|
VERIFY(m_kind == Kind::Key);
|
||||||
return m_key;
|
return m_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t index() const
|
size_t index() const
|
||||||
{
|
{
|
||||||
ASSERT(m_kind == Kind::Index);
|
VERIFY(m_kind == Kind::Index);
|
||||||
return m_index;
|
return m_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ void JsonValue::copy_from(const JsonValue& other)
|
||||||
m_type = other.m_type;
|
m_type = other.m_type;
|
||||||
switch (m_type) {
|
switch (m_type) {
|
||||||
case Type::String:
|
case Type::String:
|
||||||
ASSERT(!m_value.as_string);
|
VERIFY(!m_value.as_string);
|
||||||
m_value.as_string = other.m_value.as_string;
|
m_value.as_string = other.m_value.as_string;
|
||||||
m_value.as_string->ref();
|
m_value.as_string->ref();
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -127,56 +127,56 @@ public:
|
||||||
|
|
||||||
i32 as_i32() const
|
i32 as_i32() const
|
||||||
{
|
{
|
||||||
ASSERT(is_i32());
|
VERIFY(is_i32());
|
||||||
return m_value.as_i32;
|
return m_value.as_i32;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 as_u32() const
|
u32 as_u32() const
|
||||||
{
|
{
|
||||||
ASSERT(is_u32());
|
VERIFY(is_u32());
|
||||||
return m_value.as_u32;
|
return m_value.as_u32;
|
||||||
}
|
}
|
||||||
|
|
||||||
i64 as_i64() const
|
i64 as_i64() const
|
||||||
{
|
{
|
||||||
ASSERT(is_i64());
|
VERIFY(is_i64());
|
||||||
return m_value.as_i64;
|
return m_value.as_i64;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 as_u64() const
|
u64 as_u64() const
|
||||||
{
|
{
|
||||||
ASSERT(is_u64());
|
VERIFY(is_u64());
|
||||||
return m_value.as_u64;
|
return m_value.as_u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
int as_bool() const
|
int as_bool() const
|
||||||
{
|
{
|
||||||
ASSERT(is_bool());
|
VERIFY(is_bool());
|
||||||
return m_value.as_bool;
|
return m_value.as_bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
String as_string() const
|
String as_string() const
|
||||||
{
|
{
|
||||||
ASSERT(is_string());
|
VERIFY(is_string());
|
||||||
return *m_value.as_string;
|
return *m_value.as_string;
|
||||||
}
|
}
|
||||||
|
|
||||||
const JsonObject& as_object() const
|
const JsonObject& as_object() const
|
||||||
{
|
{
|
||||||
ASSERT(is_object());
|
VERIFY(is_object());
|
||||||
return *m_value.as_object;
|
return *m_value.as_object;
|
||||||
}
|
}
|
||||||
|
|
||||||
const JsonArray& as_array() const
|
const JsonArray& as_array() const
|
||||||
{
|
{
|
||||||
ASSERT(is_array());
|
VERIFY(is_array());
|
||||||
return *m_value.as_array;
|
return *m_value.as_array;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(KERNEL)
|
#if !defined(KERNEL)
|
||||||
double as_double() const
|
double as_double() const
|
||||||
{
|
{
|
||||||
ASSERT(is_double());
|
VERIFY(is_double());
|
||||||
return m_value.as_double;
|
return m_value.as_double;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -52,13 +52,13 @@ public:
|
||||||
|
|
||||||
constexpr const u8& operator[](unsigned i) const
|
constexpr const u8& operator[](unsigned i) const
|
||||||
{
|
{
|
||||||
ASSERT(i < s_mac_address_length);
|
VERIFY(i < s_mac_address_length);
|
||||||
return m_data[i];
|
return m_data[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u8& operator[](unsigned i)
|
constexpr u8& operator[](unsigned i)
|
||||||
{
|
{
|
||||||
ASSERT(i < s_mac_address_length);
|
VERIFY(i < s_mac_address_length);
|
||||||
return m_data[i];
|
return m_data[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ MappedFile::MappedFile(void* ptr, size_t size)
|
||||||
MappedFile::~MappedFile()
|
MappedFile::~MappedFile()
|
||||||
{
|
{
|
||||||
auto rc = munmap(m_data, m_size);
|
auto rc = munmap(m_data, m_size);
|
||||||
ASSERT(rc == 0);
|
VERIFY(rc == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ namespace AK {
|
||||||
namespace {
|
namespace {
|
||||||
const static void* bitap_bitwise(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
|
const static void* bitap_bitwise(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
|
||||||
{
|
{
|
||||||
ASSERT(needle_length < 32);
|
VERIFY(needle_length < 32);
|
||||||
|
|
||||||
u64 lookup = 0xfffffffe;
|
u64 lookup = 0xfffffffe;
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ public:
|
||||||
|
|
||||||
void seek(size_t offset)
|
void seek(size_t offset)
|
||||||
{
|
{
|
||||||
ASSERT(offset < m_bytes.size());
|
VERIFY(offset < m_bytes.size());
|
||||||
m_offset = offset;
|
m_offset = offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,7 +309,7 @@ public:
|
||||||
auto buffer = ByteBuffer::create_uninitialized(size());
|
auto buffer = ByteBuffer::create_uninitialized(size());
|
||||||
|
|
||||||
const auto nread = read_without_consuming(buffer);
|
const auto nread = read_without_consuming(buffer);
|
||||||
ASSERT(nread == buffer.size());
|
VERIFY(nread == buffer.size());
|
||||||
|
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,13 +59,13 @@ public:
|
||||||
NonnullOwnPtr(NonnullOwnPtr&& other)
|
NonnullOwnPtr(NonnullOwnPtr&& other)
|
||||||
: m_ptr(other.leak_ptr())
|
: m_ptr(other.leak_ptr())
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
}
|
}
|
||||||
template<typename U>
|
template<typename U>
|
||||||
NonnullOwnPtr(NonnullOwnPtr<U>&& other)
|
NonnullOwnPtr(NonnullOwnPtr<U>&& other)
|
||||||
: m_ptr(other.leak_ptr())
|
: m_ptr(other.leak_ptr())
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
}
|
}
|
||||||
~NonnullOwnPtr()
|
~NonnullOwnPtr()
|
||||||
{
|
{
|
||||||
|
@ -147,7 +147,7 @@ public:
|
||||||
template<typename U>
|
template<typename U>
|
||||||
NonnullOwnPtr<U> release_nonnull()
|
NonnullOwnPtr<U> release_nonnull()
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return NonnullOwnPtr<U>(NonnullOwnPtr<U>::Adopt, static_cast<U&>(*leak_ptr()));
|
return NonnullOwnPtr<U>(NonnullOwnPtr<U>::Adopt, static_cast<U&>(*leak_ptr()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,42 +72,42 @@ public:
|
||||||
ALWAYS_INLINE NonnullRefPtr(const T& object)
|
ALWAYS_INLINE NonnullRefPtr(const T& object)
|
||||||
: m_bits((FlatPtr)&object)
|
: m_bits((FlatPtr)&object)
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
const_cast<T&>(object).ref();
|
const_cast<T&>(object).ref();
|
||||||
}
|
}
|
||||||
template<typename U>
|
template<typename U>
|
||||||
ALWAYS_INLINE NonnullRefPtr(const U& object)
|
ALWAYS_INLINE NonnullRefPtr(const U& object)
|
||||||
: m_bits((FlatPtr) static_cast<const T*>(&object))
|
: m_bits((FlatPtr) static_cast<const T*>(&object))
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
const_cast<T&>(static_cast<const T&>(object)).ref();
|
const_cast<T&>(static_cast<const T&>(object)).ref();
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE NonnullRefPtr(AdoptTag, T& object)
|
ALWAYS_INLINE NonnullRefPtr(AdoptTag, T& object)
|
||||||
: m_bits((FlatPtr)&object)
|
: m_bits((FlatPtr)&object)
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr&& other)
|
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr&& other)
|
||||||
: m_bits((FlatPtr)&other.leak_ref())
|
: m_bits((FlatPtr)&other.leak_ref())
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
}
|
}
|
||||||
template<typename U>
|
template<typename U>
|
||||||
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr<U>&& other)
|
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr<U>&& other)
|
||||||
: m_bits((FlatPtr)&other.leak_ref())
|
: m_bits((FlatPtr)&other.leak_ref())
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr& other)
|
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr& other)
|
||||||
: m_bits((FlatPtr)other.add_ref())
|
: m_bits((FlatPtr)other.add_ref())
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
}
|
}
|
||||||
template<typename U>
|
template<typename U>
|
||||||
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr<U>& other)
|
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr<U>& other)
|
||||||
: m_bits((FlatPtr)other.add_ref())
|
: m_bits((FlatPtr)other.add_ref())
|
||||||
{
|
{
|
||||||
ASSERT(!(m_bits & 1));
|
VERIFY(!(m_bits & 1));
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE ~NonnullRefPtr()
|
ALWAYS_INLINE ~NonnullRefPtr()
|
||||||
{
|
{
|
||||||
|
@ -170,7 +170,7 @@ public:
|
||||||
[[nodiscard]] ALWAYS_INLINE T& leak_ref()
|
[[nodiscard]] ALWAYS_INLINE T& leak_ref()
|
||||||
{
|
{
|
||||||
T* ptr = exchange(nullptr);
|
T* ptr = exchange(nullptr);
|
||||||
ASSERT(ptr);
|
VERIFY(ptr);
|
||||||
return *ptr;
|
return *ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,7 +253,7 @@ private:
|
||||||
ALWAYS_INLINE T* as_nonnull_ptr() const
|
ALWAYS_INLINE T* as_nonnull_ptr() const
|
||||||
{
|
{
|
||||||
T* ptr = (T*)(m_bits.load(AK::MemoryOrder::memory_order_relaxed) & ~(FlatPtr)1);
|
T* ptr = (T*)(m_bits.load(AK::MemoryOrder::memory_order_relaxed) & ~(FlatPtr)1);
|
||||||
ASSERT(ptr);
|
VERIFY(ptr);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ private:
|
||||||
Kernel::Processor::wait_check();
|
Kernel::Processor::wait_check();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
ASSERT(!(bits & 1));
|
VERIFY(!(bits & 1));
|
||||||
f((T*)bits);
|
f((T*)bits);
|
||||||
m_bits.store(bits, AK::MemoryOrder::memory_order_release);
|
m_bits.store(bits, AK::MemoryOrder::memory_order_release);
|
||||||
}
|
}
|
||||||
|
@ -286,7 +286,7 @@ private:
|
||||||
|
|
||||||
ALWAYS_INLINE T* exchange(T* new_ptr)
|
ALWAYS_INLINE T* exchange(T* new_ptr)
|
||||||
{
|
{
|
||||||
ASSERT(!((FlatPtr)new_ptr & 1));
|
VERIFY(!((FlatPtr)new_ptr & 1));
|
||||||
#ifdef KERNEL
|
#ifdef KERNEL
|
||||||
// We don't want to be pre-empted while we have the lock bit set
|
// We don't want to be pre-empted while we have the lock bit set
|
||||||
Kernel::ScopedCritical critical;
|
Kernel::ScopedCritical critical;
|
||||||
|
@ -301,7 +301,7 @@ private:
|
||||||
Kernel::Processor::wait_check();
|
Kernel::Processor::wait_check();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
ASSERT(!(expected & 1));
|
VERIFY(!(expected & 1));
|
||||||
return (T*)expected;
|
return (T*)expected;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -128,19 +128,19 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] ALWAYS_INLINE T& value()
|
[[nodiscard]] ALWAYS_INLINE T& value()
|
||||||
{
|
{
|
||||||
ASSERT(m_has_value);
|
VERIFY(m_has_value);
|
||||||
return *reinterpret_cast<T*>(&m_storage);
|
return *reinterpret_cast<T*>(&m_storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] ALWAYS_INLINE const T& value() const
|
[[nodiscard]] ALWAYS_INLINE const T& value() const
|
||||||
{
|
{
|
||||||
ASSERT(m_has_value);
|
VERIFY(m_has_value);
|
||||||
return *reinterpret_cast<const T*>(&m_storage);
|
return *reinterpret_cast<const T*>(&m_storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] T release_value()
|
[[nodiscard]] T release_value()
|
||||||
{
|
{
|
||||||
ASSERT(m_has_value);
|
VERIFY(m_has_value);
|
||||||
T released_value = move(value());
|
T released_value = move(value());
|
||||||
value().~T();
|
value().~T();
|
||||||
m_has_value = false;
|
m_has_value = false;
|
||||||
|
|
14
AK/OwnPtr.h
14
AK/OwnPtr.h
|
@ -112,7 +112,7 @@ public:
|
||||||
{
|
{
|
||||||
OwnPtr ptr(move(other));
|
OwnPtr ptr(move(other));
|
||||||
swap(ptr);
|
swap(ptr);
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,14 +147,14 @@ public:
|
||||||
|
|
||||||
NonnullOwnPtr<T> release_nonnull()
|
NonnullOwnPtr<T> release_nonnull()
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return NonnullOwnPtr<T>(NonnullOwnPtr<T>::Adopt, *leak_ptr());
|
return NonnullOwnPtr<T>(NonnullOwnPtr<T>::Adopt, *leak_ptr());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename U>
|
template<typename U>
|
||||||
NonnullOwnPtr<U> release_nonnull()
|
NonnullOwnPtr<U> release_nonnull()
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return NonnullOwnPtr<U>(NonnullOwnPtr<U>::Adopt, static_cast<U&>(*leak_ptr()));
|
return NonnullOwnPtr<U>(NonnullOwnPtr<U>::Adopt, static_cast<U&>(*leak_ptr()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,25 +163,25 @@ public:
|
||||||
|
|
||||||
T* operator->()
|
T* operator->()
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return m_ptr;
|
return m_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
const T* operator->() const
|
const T* operator->() const
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return m_ptr;
|
return m_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
T& operator*()
|
T& operator*()
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return *m_ptr;
|
return *m_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
const T& operator*() const
|
const T& operator*() const
|
||||||
{
|
{
|
||||||
ASSERT(m_ptr);
|
VERIFY(m_ptr);
|
||||||
return *m_ptr;
|
return *m_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ public:
|
||||||
|
|
||||||
T dequeue()
|
T dequeue()
|
||||||
{
|
{
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
auto value = move((*m_segments.first())[m_index_into_first++]);
|
auto value = move((*m_segments.first())[m_index_into_first++]);
|
||||||
if (m_index_into_first == segment_size) {
|
if (m_index_into_first == segment_size) {
|
||||||
m_segments.take_first();
|
m_segments.take_first();
|
||||||
|
@ -64,7 +64,7 @@ public:
|
||||||
|
|
||||||
const T& head() const
|
const T& head() const
|
||||||
{
|
{
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
return (*m_segments.first())[m_index_into_first];
|
return (*m_segments.first())[m_index_into_first];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,8 +70,8 @@ public:
|
||||||
ALWAYS_INLINE void ref() const
|
ALWAYS_INLINE void ref() const
|
||||||
{
|
{
|
||||||
auto old_ref_count = m_ref_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed);
|
auto old_ref_count = m_ref_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed);
|
||||||
ASSERT(old_ref_count > 0);
|
VERIFY(old_ref_count > 0);
|
||||||
ASSERT(!Checked<RefCountType>::addition_would_overflow(old_ref_count, 1));
|
VERIFY(!Checked<RefCountType>::addition_would_overflow(old_ref_count, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] ALWAYS_INLINE bool try_ref() const
|
[[nodiscard]] ALWAYS_INLINE bool try_ref() const
|
||||||
|
@ -80,7 +80,7 @@ public:
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (expected == 0)
|
if (expected == 0)
|
||||||
return false;
|
return false;
|
||||||
ASSERT(!Checked<RefCountType>::addition_would_overflow(expected, 1));
|
VERIFY(!Checked<RefCountType>::addition_would_overflow(expected, 1));
|
||||||
if (m_ref_count.compare_exchange_strong(expected, expected + 1, AK::MemoryOrder::memory_order_acquire))
|
if (m_ref_count.compare_exchange_strong(expected, expected + 1, AK::MemoryOrder::memory_order_acquire))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -95,13 +95,13 @@ protected:
|
||||||
RefCountedBase() = default;
|
RefCountedBase() = default;
|
||||||
ALWAYS_INLINE ~RefCountedBase()
|
ALWAYS_INLINE ~RefCountedBase()
|
||||||
{
|
{
|
||||||
ASSERT(m_ref_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
|
VERIFY(m_ref_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE RefCountType deref_base() const
|
ALWAYS_INLINE RefCountType deref_base() const
|
||||||
{
|
{
|
||||||
auto old_ref_count = m_ref_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
|
auto old_ref_count = m_ref_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
|
||||||
ASSERT(old_ref_count > 0);
|
VERIFY(old_ref_count > 0);
|
||||||
return old_ref_count - 1;
|
return old_ref_count - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
26
AK/RefPtr.h
26
AK/RefPtr.h
|
@ -50,7 +50,7 @@ struct RefPtrTraits {
|
||||||
|
|
||||||
ALWAYS_INLINE static FlatPtr as_bits(T* ptr)
|
ALWAYS_INLINE static FlatPtr as_bits(T* ptr)
|
||||||
{
|
{
|
||||||
ASSERT(!((FlatPtr)ptr & 1));
|
VERIFY(!((FlatPtr)ptr & 1));
|
||||||
return (FlatPtr)ptr;
|
return (FlatPtr)ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ struct RefPtrTraits {
|
||||||
ALWAYS_INLINE static FlatPtr exchange(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
|
ALWAYS_INLINE static FlatPtr exchange(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
|
||||||
{
|
{
|
||||||
// Only exchange when lock is not held
|
// Only exchange when lock is not held
|
||||||
ASSERT(!(new_value & 1));
|
VERIFY(!(new_value & 1));
|
||||||
FlatPtr expected = atomic_var.load(AK::MemoryOrder::memory_order_relaxed);
|
FlatPtr expected = atomic_var.load(AK::MemoryOrder::memory_order_relaxed);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
expected &= ~(FlatPtr)1; // only if lock bit is not set
|
expected &= ~(FlatPtr)1; // only if lock bit is not set
|
||||||
|
@ -86,7 +86,7 @@ struct RefPtrTraits {
|
||||||
ALWAYS_INLINE static bool exchange_if_null(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
|
ALWAYS_INLINE static bool exchange_if_null(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
|
||||||
{
|
{
|
||||||
// Only exchange when lock is not held
|
// Only exchange when lock is not held
|
||||||
ASSERT(!(new_value & 1));
|
VERIFY(!(new_value & 1));
|
||||||
for (;;) {
|
for (;;) {
|
||||||
FlatPtr expected = default_null_value; // only if lock bit is not set
|
FlatPtr expected = default_null_value; // only if lock bit is not set
|
||||||
if (atomic_var.compare_exchange_strong(expected, new_value, AK::MemoryOrder::memory_order_acq_rel))
|
if (atomic_var.compare_exchange_strong(expected, new_value, AK::MemoryOrder::memory_order_acq_rel))
|
||||||
|
@ -116,13 +116,13 @@ struct RefPtrTraits {
|
||||||
Kernel::Processor::wait_check();
|
Kernel::Processor::wait_check();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
ASSERT(!(bits & 1));
|
VERIFY(!(bits & 1));
|
||||||
return bits;
|
return bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE static void unlock(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
|
ALWAYS_INLINE static void unlock(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
|
||||||
{
|
{
|
||||||
ASSERT(!(new_value & 1));
|
VERIFY(!(new_value & 1));
|
||||||
atomic_var.store(new_value, AK::MemoryOrder::memory_order_release);
|
atomic_var.store(new_value, AK::MemoryOrder::memory_order_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,14 +153,14 @@ public:
|
||||||
: m_bits(PtrTraits::as_bits(const_cast<T*>(&object)))
|
: m_bits(PtrTraits::as_bits(const_cast<T*>(&object)))
|
||||||
{
|
{
|
||||||
T* ptr = const_cast<T*>(&object);
|
T* ptr = const_cast<T*>(&object);
|
||||||
ASSERT(ptr);
|
VERIFY(ptr);
|
||||||
ASSERT(!is_null());
|
VERIFY(!is_null());
|
||||||
ptr->ref();
|
ptr->ref();
|
||||||
}
|
}
|
||||||
RefPtr(AdoptTag, T& object)
|
RefPtr(AdoptTag, T& object)
|
||||||
: m_bits(PtrTraits::as_bits(&object))
|
: m_bits(PtrTraits::as_bits(&object))
|
||||||
{
|
{
|
||||||
ASSERT(!is_null());
|
VERIFY(!is_null());
|
||||||
}
|
}
|
||||||
RefPtr(RefPtr&& other)
|
RefPtr(RefPtr&& other)
|
||||||
: m_bits(other.leak_ref_raw())
|
: m_bits(other.leak_ref_raw())
|
||||||
|
@ -179,7 +179,7 @@ public:
|
||||||
ALWAYS_INLINE RefPtr(NonnullRefPtr<U>&& other)
|
ALWAYS_INLINE RefPtr(NonnullRefPtr<U>&& other)
|
||||||
: m_bits(PtrTraits::as_bits(&other.leak_ref()))
|
: m_bits(PtrTraits::as_bits(&other.leak_ref()))
|
||||||
{
|
{
|
||||||
ASSERT(!is_null());
|
VERIFY(!is_null());
|
||||||
}
|
}
|
||||||
template<typename U, typename P = RefPtrTraits<U>>
|
template<typename U, typename P = RefPtrTraits<U>>
|
||||||
RefPtr(RefPtr<U, P>&& other)
|
RefPtr(RefPtr<U, P>&& other)
|
||||||
|
@ -330,7 +330,7 @@ public:
|
||||||
NonnullRefPtr<T> release_nonnull()
|
NonnullRefPtr<T> release_nonnull()
|
||||||
{
|
{
|
||||||
FlatPtr bits = PtrTraits::exchange(m_bits, PtrTraits::default_null_value);
|
FlatPtr bits = PtrTraits::exchange(m_bits, PtrTraits::default_null_value);
|
||||||
ASSERT(!PtrTraits::is_null(bits));
|
VERIFY(!PtrTraits::is_null(bits));
|
||||||
return NonnullRefPtr<T>(NonnullRefPtr<T>::Adopt, *PtrTraits::as_ptr(bits));
|
return NonnullRefPtr<T>(NonnullRefPtr<T>::Adopt, *PtrTraits::as_ptr(bits));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,7 +384,7 @@ public:
|
||||||
{
|
{
|
||||||
// make sure we are holding a null value
|
// make sure we are holding a null value
|
||||||
FlatPtr bits = m_bits.load(AK::MemoryOrder::memory_order_relaxed);
|
FlatPtr bits = m_bits.load(AK::MemoryOrder::memory_order_relaxed);
|
||||||
ASSERT(PtrTraits::is_null(bits));
|
VERIFY(PtrTraits::is_null(bits));
|
||||||
return PtrTraits::to_null_value(bits);
|
return PtrTraits::to_null_value(bits);
|
||||||
}
|
}
|
||||||
template<typename U = T, typename EnableIf<IsSame<U, T>::value && !IsNullPointer<typename PtrTraits::NullType>::value>::Type* = nullptr>
|
template<typename U = T, typename EnableIf<IsSame<U, T>::value && !IsNullPointer<typename PtrTraits::NullType>::value>::Type* = nullptr>
|
||||||
|
@ -392,7 +392,7 @@ public:
|
||||||
{
|
{
|
||||||
// make sure that new null value would be interpreted as a null value
|
// make sure that new null value would be interpreted as a null value
|
||||||
FlatPtr bits = PtrTraits::from_null_value(value);
|
FlatPtr bits = PtrTraits::from_null_value(value);
|
||||||
ASSERT(PtrTraits::is_null(bits));
|
VERIFY(PtrTraits::is_null(bits));
|
||||||
assign_raw(bits);
|
assign_raw(bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -454,7 +454,7 @@ private:
|
||||||
|
|
||||||
ALWAYS_INLINE T* as_nonnull_ptr(FlatPtr bits) const
|
ALWAYS_INLINE T* as_nonnull_ptr(FlatPtr bits) const
|
||||||
{
|
{
|
||||||
ASSERT(!PtrTraits::is_null(bits));
|
VERIFY(!PtrTraits::is_null(bits));
|
||||||
return PtrTraits::as_ptr(bits);
|
return PtrTraits::as_ptr(bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,9 +83,9 @@ public:
|
||||||
}
|
}
|
||||||
if constexpr (allow_create) {
|
if constexpr (allow_create) {
|
||||||
// We should always return an instance if we allow creating one
|
// We should always return an instance if we allow creating one
|
||||||
ASSERT(obj != nullptr);
|
VERIFY(obj != nullptr);
|
||||||
}
|
}
|
||||||
ASSERT(obj != (T*)0x1);
|
VERIFY(obj != (T*)0x1);
|
||||||
}
|
}
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,28 +104,28 @@ public:
|
||||||
|
|
||||||
T& first()
|
T& first()
|
||||||
{
|
{
|
||||||
ASSERT(head());
|
VERIFY(head());
|
||||||
return head()->value;
|
return head()->value;
|
||||||
}
|
}
|
||||||
const T& first() const
|
const T& first() const
|
||||||
{
|
{
|
||||||
ASSERT(head());
|
VERIFY(head());
|
||||||
return head()->value;
|
return head()->value;
|
||||||
}
|
}
|
||||||
T& last()
|
T& last()
|
||||||
{
|
{
|
||||||
ASSERT(head());
|
VERIFY(head());
|
||||||
return tail()->value;
|
return tail()->value;
|
||||||
}
|
}
|
||||||
const T& last() const
|
const T& last() const
|
||||||
{
|
{
|
||||||
ASSERT(head());
|
VERIFY(head());
|
||||||
return tail()->value;
|
return tail()->value;
|
||||||
}
|
}
|
||||||
|
|
||||||
T take_first()
|
T take_first()
|
||||||
{
|
{
|
||||||
ASSERT(m_head);
|
VERIFY(m_head);
|
||||||
auto* prev_head = m_head;
|
auto* prev_head = m_head;
|
||||||
T value = move(first());
|
T value = move(first());
|
||||||
if (m_tail == m_head)
|
if (m_tail == m_head)
|
||||||
|
@ -187,7 +187,7 @@ public:
|
||||||
|
|
||||||
void remove(Iterator iterator)
|
void remove(Iterator iterator)
|
||||||
{
|
{
|
||||||
ASSERT(!iterator.is_end());
|
VERIFY(!iterator.is_end());
|
||||||
if (m_head == iterator.m_node)
|
if (m_head == iterator.m_node)
|
||||||
m_head = iterator.m_node->next;
|
m_head = iterator.m_node->next;
|
||||||
if (m_tail == iterator.m_node)
|
if (m_tail == iterator.m_node)
|
||||||
|
|
|
@ -78,11 +78,11 @@ public:
|
||||||
const auto placeholder = consume_until_without_consuming_stop_character(m_closing);
|
const auto placeholder = consume_until_without_consuming_stop_character(m_closing);
|
||||||
|
|
||||||
if (!lexer.consume_specific(m_closing))
|
if (!lexer.consume_specific(m_closing))
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
|
||||||
m_builder.append(get(placeholder));
|
m_builder.append(get(placeholder));
|
||||||
} else {
|
} else {
|
||||||
ASSERT(lexer.is_eof());
|
VERIFY(lexer.is_eof());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
14
AK/Span.h
14
AK/Span.h
|
@ -140,12 +140,12 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] ALWAYS_INLINE constexpr Span slice(size_t start, size_t length) const
|
[[nodiscard]] ALWAYS_INLINE constexpr Span slice(size_t start, size_t length) const
|
||||||
{
|
{
|
||||||
ASSERT(start + length <= size());
|
VERIFY(start + length <= size());
|
||||||
return { this->m_values + start, length };
|
return { this->m_values + start, length };
|
||||||
}
|
}
|
||||||
[[nodiscard]] ALWAYS_INLINE constexpr Span slice(size_t start) const
|
[[nodiscard]] ALWAYS_INLINE constexpr Span slice(size_t start) const
|
||||||
{
|
{
|
||||||
ASSERT(start <= size());
|
VERIFY(start <= size());
|
||||||
return { this->m_values + start, size() - start };
|
return { this->m_values + start, size() - start };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,20 +156,20 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE constexpr T* offset(size_t start) const
|
ALWAYS_INLINE constexpr T* offset(size_t start) const
|
||||||
{
|
{
|
||||||
ASSERT(start < this->m_size);
|
VERIFY(start < this->m_size);
|
||||||
return this->m_values + start;
|
return this->m_values + start;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE constexpr void overwrite(size_t offset, const void* data, size_t data_size)
|
ALWAYS_INLINE constexpr void overwrite(size_t offset, const void* data, size_t data_size)
|
||||||
{
|
{
|
||||||
// make sure we're not told to write past the end
|
// make sure we're not told to write past the end
|
||||||
ASSERT(offset + data_size <= size());
|
VERIFY(offset + data_size <= size());
|
||||||
__builtin_memcpy(this->data() + offset, data, data_size);
|
__builtin_memcpy(this->data() + offset, data, data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE constexpr size_t copy_to(Span<typename RemoveConst<T>::Type> other) const
|
ALWAYS_INLINE constexpr size_t copy_to(Span<typename RemoveConst<T>::Type> other) const
|
||||||
{
|
{
|
||||||
ASSERT(other.size() >= size());
|
VERIFY(other.size() >= size());
|
||||||
return TypedTransfer<typename RemoveConst<T>::Type>::copy(other.data(), data(), size());
|
return TypedTransfer<typename RemoveConst<T>::Type>::copy(other.data(), data(), size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,12 +198,12 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE constexpr const T& at(size_t index) const
|
ALWAYS_INLINE constexpr const T& at(size_t index) const
|
||||||
{
|
{
|
||||||
ASSERT(index < this->m_size);
|
VERIFY(index < this->m_size);
|
||||||
return this->m_values[index];
|
return this->m_values[index];
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE constexpr T& at(size_t index)
|
ALWAYS_INLINE constexpr T& at(size_t index)
|
||||||
{
|
{
|
||||||
ASSERT(index < this->m_size);
|
VERIFY(index < this->m_size);
|
||||||
return this->m_values[index];
|
return this->m_values[index];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,17 +42,17 @@ StackInfo::StackInfo()
|
||||||
#ifdef __serenity__
|
#ifdef __serenity__
|
||||||
if (get_stack_bounds(&m_base, &m_size) < 0) {
|
if (get_stack_bounds(&m_base, &m_size) < 0) {
|
||||||
perror("get_stack_bounds");
|
perror("get_stack_bounds");
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
#elif __linux__
|
#elif __linux__
|
||||||
pthread_attr_t attr = {};
|
pthread_attr_t attr = {};
|
||||||
if (int rc = pthread_getattr_np(pthread_self(), &attr) != 0) {
|
if (int rc = pthread_getattr_np(pthread_self(), &attr) != 0) {
|
||||||
fprintf(stderr, "pthread_getattr_np: %s\n", strerror(-rc));
|
fprintf(stderr, "pthread_getattr_np: %s\n", strerror(-rc));
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
if (int rc = pthread_attr_getstack(&attr, (void**)&m_base, &m_size) != 0) {
|
if (int rc = pthread_attr_getstack(&attr, (void**)&m_base, &m_size) != 0) {
|
||||||
fprintf(stderr, "pthread_attr_getstack: %s\n", strerror(-rc));
|
fprintf(stderr, "pthread_attr_getstack: %s\n", strerror(-rc));
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
pthread_attr_destroy(&attr);
|
pthread_attr_destroy(&attr);
|
||||||
#elif __APPLE__
|
#elif __APPLE__
|
||||||
|
@ -73,7 +73,7 @@ StackInfo::StackInfo()
|
||||||
}
|
}
|
||||||
m_base = top_of_stack - m_size;
|
m_base = top_of_stack - m_size;
|
||||||
#else
|
#else
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
m_top = m_base + m_size;
|
m_top = m_base + m_size;
|
||||||
|
|
|
@ -57,7 +57,7 @@ constexpr T max(const T& a, const T& b)
|
||||||
template<typename T>
|
template<typename T>
|
||||||
constexpr T clamp(const T& value, const T& min, const T& max)
|
constexpr T clamp(const T& value, const T& min, const T& max)
|
||||||
{
|
{
|
||||||
ASSERT(max >= min);
|
VERIFY(max >= min);
|
||||||
if (value > max)
|
if (value > max)
|
||||||
return max;
|
return max;
|
||||||
if (value < min)
|
if (value < min)
|
||||||
|
|
|
@ -37,7 +37,7 @@ namespace AK::Detail {
|
||||||
|
|
||||||
class Stream {
|
class Stream {
|
||||||
public:
|
public:
|
||||||
virtual ~Stream() { ASSERT(!has_any_error()); }
|
virtual ~Stream() { VERIFY(!has_any_error()); }
|
||||||
|
|
||||||
virtual bool has_recoverable_error() const { return m_recoverable_error; }
|
virtual bool has_recoverable_error() const { return m_recoverable_error; }
|
||||||
virtual bool has_fatal_error() const { return m_fatal_error; }
|
virtual bool has_fatal_error() const { return m_fatal_error; }
|
||||||
|
@ -45,7 +45,7 @@ public:
|
||||||
|
|
||||||
virtual bool handle_recoverable_error()
|
virtual bool handle_recoverable_error()
|
||||||
{
|
{
|
||||||
ASSERT(!has_fatal_error());
|
VERIFY(!has_fatal_error());
|
||||||
return exchange(m_recoverable_error, false);
|
return exchange(m_recoverable_error, false);
|
||||||
}
|
}
|
||||||
virtual bool handle_fatal_error() { return exchange(m_fatal_error, false); }
|
virtual bool handle_fatal_error() { return exchange(m_fatal_error, false); }
|
||||||
|
|
|
@ -104,7 +104,7 @@ String String::empty()
|
||||||
bool String::copy_characters_to_buffer(char* buffer, size_t buffer_size) const
|
bool String::copy_characters_to_buffer(char* buffer, size_t buffer_size) const
|
||||||
{
|
{
|
||||||
// We must fit at least the NUL-terminator.
|
// We must fit at least the NUL-terminator.
|
||||||
ASSERT(buffer_size > 0);
|
VERIFY(buffer_size > 0);
|
||||||
|
|
||||||
size_t characters_to_copy = min(length(), buffer_size - 1);
|
size_t characters_to_copy = min(length(), buffer_size - 1);
|
||||||
__builtin_memcpy(buffer, characters(), characters_to_copy);
|
__builtin_memcpy(buffer, characters(), characters_to_copy);
|
||||||
|
@ -127,8 +127,8 @@ String String::isolated_copy() const
|
||||||
|
|
||||||
String String::substring(size_t start) const
|
String String::substring(size_t start) const
|
||||||
{
|
{
|
||||||
ASSERT(m_impl);
|
VERIFY(m_impl);
|
||||||
ASSERT(start <= length());
|
VERIFY(start <= length());
|
||||||
return { characters() + start, length() - start };
|
return { characters() + start, length() - start };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,24 +136,24 @@ String String::substring(size_t start, size_t length) const
|
||||||
{
|
{
|
||||||
if (!length)
|
if (!length)
|
||||||
return "";
|
return "";
|
||||||
ASSERT(m_impl);
|
VERIFY(m_impl);
|
||||||
ASSERT(start + length <= m_impl->length());
|
VERIFY(start + length <= m_impl->length());
|
||||||
// FIXME: This needs some input bounds checking.
|
// FIXME: This needs some input bounds checking.
|
||||||
return { characters() + start, length };
|
return { characters() + start, length };
|
||||||
}
|
}
|
||||||
|
|
||||||
StringView String::substring_view(size_t start, size_t length) const
|
StringView String::substring_view(size_t start, size_t length) const
|
||||||
{
|
{
|
||||||
ASSERT(m_impl);
|
VERIFY(m_impl);
|
||||||
ASSERT(start + length <= m_impl->length());
|
VERIFY(start + length <= m_impl->length());
|
||||||
// FIXME: This needs some input bounds checking.
|
// FIXME: This needs some input bounds checking.
|
||||||
return { characters() + start, length };
|
return { characters() + start, length };
|
||||||
}
|
}
|
||||||
|
|
||||||
StringView String::substring_view(size_t start) const
|
StringView String::substring_view(size_t start) const
|
||||||
{
|
{
|
||||||
ASSERT(m_impl);
|
VERIFY(m_impl);
|
||||||
ASSERT(start <= length());
|
VERIFY(start <= length());
|
||||||
return { characters() + start, length() - start };
|
return { characters() + start, length() - start };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,12 +40,12 @@ inline void StringBuilder::will_append(size_t size)
|
||||||
{
|
{
|
||||||
Checked<size_t> needed_capacity = m_length;
|
Checked<size_t> needed_capacity = m_length;
|
||||||
needed_capacity += size;
|
needed_capacity += size;
|
||||||
ASSERT(!needed_capacity.has_overflow());
|
VERIFY(!needed_capacity.has_overflow());
|
||||||
if (needed_capacity < inline_capacity)
|
if (needed_capacity < inline_capacity)
|
||||||
return;
|
return;
|
||||||
Checked<size_t> expanded_capacity = needed_capacity;
|
Checked<size_t> expanded_capacity = needed_capacity;
|
||||||
expanded_capacity *= 2;
|
expanded_capacity *= 2;
|
||||||
ASSERT(!expanded_capacity.has_overflow());
|
VERIFY(!expanded_capacity.has_overflow());
|
||||||
if (m_buffer.is_null()) {
|
if (m_buffer.is_null()) {
|
||||||
m_buffer.grow(expanded_capacity.value());
|
m_buffer.grow(expanded_capacity.value());
|
||||||
memcpy(m_buffer.data(), m_inline_buffer, m_length);
|
memcpy(m_buffer.data(), m_inline_buffer, m_length);
|
||||||
|
|
|
@ -88,9 +88,9 @@ static inline size_t allocation_size_for_stringimpl(size_t length)
|
||||||
|
|
||||||
NonnullRefPtr<StringImpl> StringImpl::create_uninitialized(size_t length, char*& buffer)
|
NonnullRefPtr<StringImpl> StringImpl::create_uninitialized(size_t length, char*& buffer)
|
||||||
{
|
{
|
||||||
ASSERT(length);
|
VERIFY(length);
|
||||||
void* slot = kmalloc(allocation_size_for_stringimpl(length));
|
void* slot = kmalloc(allocation_size_for_stringimpl(length));
|
||||||
ASSERT(slot);
|
VERIFY(slot);
|
||||||
auto new_stringimpl = adopt(*new (slot) StringImpl(ConstructWithInlineBuffer, length));
|
auto new_stringimpl = adopt(*new (slot) StringImpl(ConstructWithInlineBuffer, length));
|
||||||
buffer = const_cast<char*>(new_stringimpl->characters());
|
buffer = const_cast<char*>(new_stringimpl->characters());
|
||||||
buffer[length] = '\0';
|
buffer[length] = '\0';
|
||||||
|
|
|
@ -66,7 +66,7 @@ public:
|
||||||
|
|
||||||
const char& operator[](size_t i) const
|
const char& operator[](size_t i) const
|
||||||
{
|
{
|
||||||
ASSERT(i < m_length);
|
VERIFY(i < m_length);
|
||||||
return characters()[i];
|
return characters()[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ Vector<StringView> StringView::split_view(const char separator, bool keep_empty)
|
||||||
|
|
||||||
Vector<StringView> StringView::split_view(const StringView& separator, bool keep_empty) const
|
Vector<StringView> StringView::split_view(const StringView& separator, bool keep_empty) const
|
||||||
{
|
{
|
||||||
ASSERT(!separator.is_empty());
|
VERIFY(!separator.is_empty());
|
||||||
|
|
||||||
if (is_empty())
|
if (is_empty())
|
||||||
return {};
|
return {};
|
||||||
|
@ -197,20 +197,20 @@ bool StringView::equals_ignoring_case(const StringView& other) const
|
||||||
|
|
||||||
StringView StringView::substring_view(size_t start, size_t length) const
|
StringView StringView::substring_view(size_t start, size_t length) const
|
||||||
{
|
{
|
||||||
ASSERT(start + length <= m_length);
|
VERIFY(start + length <= m_length);
|
||||||
return { m_characters + start, length };
|
return { m_characters + start, length };
|
||||||
}
|
}
|
||||||
StringView StringView::substring_view(size_t start) const
|
StringView StringView::substring_view(size_t start) const
|
||||||
{
|
{
|
||||||
ASSERT(start <= m_length);
|
VERIFY(start <= m_length);
|
||||||
return { m_characters + start, length() - start };
|
return { m_characters + start, length() - start };
|
||||||
}
|
}
|
||||||
|
|
||||||
StringView StringView::substring_view_starting_from_substring(const StringView& substring) const
|
StringView StringView::substring_view_starting_from_substring(const StringView& substring) const
|
||||||
{
|
{
|
||||||
const char* remaining_characters = substring.characters_without_null_termination();
|
const char* remaining_characters = substring.characters_without_null_termination();
|
||||||
ASSERT(remaining_characters >= m_characters);
|
VERIFY(remaining_characters >= m_characters);
|
||||||
ASSERT(remaining_characters <= m_characters + m_length);
|
VERIFY(remaining_characters <= m_characters + m_length);
|
||||||
size_t remaining_length = m_length - (remaining_characters - m_characters);
|
size_t remaining_length = m_length - (remaining_characters - m_characters);
|
||||||
return { remaining_characters, remaining_length };
|
return { remaining_characters, remaining_length };
|
||||||
}
|
}
|
||||||
|
@ -218,8 +218,8 @@ StringView StringView::substring_view_starting_from_substring(const StringView&
|
||||||
StringView StringView::substring_view_starting_after_substring(const StringView& substring) const
|
StringView StringView::substring_view_starting_after_substring(const StringView& substring) const
|
||||||
{
|
{
|
||||||
const char* remaining_characters = substring.characters_without_null_termination() + substring.length();
|
const char* remaining_characters = substring.characters_without_null_termination() + substring.length();
|
||||||
ASSERT(remaining_characters >= m_characters);
|
VERIFY(remaining_characters >= m_characters);
|
||||||
ASSERT(remaining_characters <= m_characters + m_length);
|
VERIFY(remaining_characters <= m_characters + m_length);
|
||||||
size_t remaining_length = m_length - (remaining_characters - m_characters);
|
size_t remaining_length = m_length - (remaining_characters - m_characters);
|
||||||
return { remaining_characters, remaining_length };
|
return { remaining_characters, remaining_length };
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,13 +42,13 @@ public:
|
||||||
: m_characters(characters)
|
: m_characters(characters)
|
||||||
, m_length(length)
|
, m_length(length)
|
||||||
{
|
{
|
||||||
ASSERT(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
|
VERIFY(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE StringView(const unsigned char* characters, size_t length)
|
ALWAYS_INLINE StringView(const unsigned char* characters, size_t length)
|
||||||
: m_characters((const char*)characters)
|
: m_characters((const char*)characters)
|
||||||
, m_length(length)
|
, m_length(length)
|
||||||
{
|
{
|
||||||
ASSERT(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
|
VERIFY(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE constexpr StringView(const char* cstring)
|
ALWAYS_INLINE constexpr StringView(const char* cstring)
|
||||||
: m_characters(cstring)
|
: m_characters(cstring)
|
||||||
|
|
|
@ -38,11 +38,11 @@ void warnln(CheckedFormatString<Parameters...>&& fmtstr, const Parameters&...);
|
||||||
|
|
||||||
using AK::warnln;
|
using AK::warnln;
|
||||||
|
|
||||||
#undef ASSERT
|
#undef VERIFY
|
||||||
#define ASSERT(x) \
|
#define VERIFY(x) \
|
||||||
do { \
|
do { \
|
||||||
if (!(x)) \
|
if (!(x)) \
|
||||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: ASSERT({}) failed", __FILE__, __LINE__, #x); \
|
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY({}) failed", __FILE__, __LINE__, #x); \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
#undef RELEASE_ASSERT
|
#undef RELEASE_ASSERT
|
||||||
|
@ -52,10 +52,10 @@ using AK::warnln;
|
||||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: RELEASE_ASSERT({}) failed", __FILE__, __LINE__, #x); \
|
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: RELEASE_ASSERT({}) failed", __FILE__, __LINE__, #x); \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
#undef ASSERT_NOT_REACHED
|
#undef VERIFY_NOT_REACHED
|
||||||
#define ASSERT_NOT_REACHED() \
|
#define VERIFY_NOT_REACHED() \
|
||||||
do { \
|
do { \
|
||||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: ASSERT_NOT_REACHED() called", __FILE__, __LINE__); \
|
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY_NOT_REACHED() called", __FILE__, __LINE__); \
|
||||||
::abort(); \
|
::abort(); \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
|
|
|
@ -177,7 +177,7 @@ TEST_CASE(pointers)
|
||||||
EXPECT_EQ(String::formatted("{:p}", ptr), "0x0000000000004000");
|
EXPECT_EQ(String::formatted("{:p}", ptr), "0x0000000000004000");
|
||||||
EXPECT_EQ(String::formatted("{}", ptr), "0x0000000000004000");
|
EXPECT_EQ(String::formatted("{}", ptr), "0x0000000000004000");
|
||||||
} else {
|
} else {
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
TEST_CASE(load_form)
|
TEST_CASE(load_form)
|
||||||
{
|
{
|
||||||
FILE* fp = fopen("test.frm", "r");
|
FILE* fp = fopen("test.frm", "r");
|
||||||
ASSERT(fp);
|
VERIFY(fp);
|
||||||
|
|
||||||
StringBuilder builder;
|
StringBuilder builder;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -69,7 +69,7 @@ TEST_CASE(load_form)
|
||||||
BENCHMARK_CASE(load_4chan_catalog)
|
BENCHMARK_CASE(load_4chan_catalog)
|
||||||
{
|
{
|
||||||
FILE* fp = fopen("4chan_catalog.json", "r");
|
FILE* fp = fopen("4chan_catalog.json", "r");
|
||||||
ASSERT(fp);
|
VERIFY(fp);
|
||||||
|
|
||||||
StringBuilder builder;
|
StringBuilder builder;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
|
|
@ -38,7 +38,7 @@ TEST_CASE(decode_ascii)
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for (u32 code_point : utf8) {
|
for (u32 code_point : utf8) {
|
||||||
ASSERT(i < expected_size);
|
VERIFY(i < expected_size);
|
||||||
EXPECT_EQ(code_point, expected[i]);
|
EXPECT_EQ(code_point, expected[i]);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ TEST_CASE(decode_utf8)
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for (u32 code_point : utf8) {
|
for (u32 code_point : utf8) {
|
||||||
ASSERT(i < expected_size);
|
VERIFY(i < expected_size);
|
||||||
EXPECT_EQ(code_point, expected[i]);
|
EXPECT_EQ(code_point, expected[i]);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ namespace AK {
|
||||||
|
|
||||||
int day_of_year(int year, unsigned month, int day)
|
int day_of_year(int year, unsigned month, int day)
|
||||||
{
|
{
|
||||||
ASSERT(month >= 1 && month <= 12);
|
VERIFY(month >= 1 && month <= 12);
|
||||||
|
|
||||||
static const int seek_table[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
|
static const int seek_table[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
|
||||||
int day_of_year = seek_table[month - 1] + day - 1;
|
int day_of_year = seek_table[month - 1] + day - 1;
|
||||||
|
@ -44,7 +44,7 @@ int day_of_year(int year, unsigned month, int day)
|
||||||
|
|
||||||
int days_in_month(int year, unsigned month)
|
int days_in_month(int year, unsigned month)
|
||||||
{
|
{
|
||||||
ASSERT(month >= 1 && month <= 12);
|
VERIFY(month >= 1 && month <= 12);
|
||||||
if (month == 2)
|
if (month == 2)
|
||||||
return is_leap_year(year) ? 29 : 28;
|
return is_leap_year(year) ? 29 : 28;
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ int days_in_month(int year, unsigned month)
|
||||||
|
|
||||||
unsigned day_of_week(int year, unsigned month, int day)
|
unsigned day_of_week(int year, unsigned month, int day)
|
||||||
{
|
{
|
||||||
ASSERT(month >= 1 && month <= 12);
|
VERIFY(month >= 1 && month <= 12);
|
||||||
static const int seek_table[] = { 0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4 };
|
static const int seek_table[] = { 0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4 };
|
||||||
if (month < 3)
|
if (month < 3)
|
||||||
--year;
|
--year;
|
||||||
|
|
|
@ -51,7 +51,7 @@ template<typename OutputType, typename InputType>
|
||||||
ALWAYS_INLINE CopyConst<InputType, OutputType>* downcast(InputType* input)
|
ALWAYS_INLINE CopyConst<InputType, OutputType>* downcast(InputType* input)
|
||||||
{
|
{
|
||||||
static_assert(IsBaseOf<InputType, OutputType>::value);
|
static_assert(IsBaseOf<InputType, OutputType>::value);
|
||||||
ASSERT(!input || is<OutputType>(*input));
|
VERIFY(!input || is<OutputType>(*input));
|
||||||
return static_cast<CopyConst<InputType, OutputType>*>(input);
|
return static_cast<CopyConst<InputType, OutputType>*>(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ template<typename OutputType, typename InputType>
|
||||||
ALWAYS_INLINE CopyConst<InputType, OutputType>& downcast(InputType& input)
|
ALWAYS_INLINE CopyConst<InputType, OutputType>& downcast(InputType& input)
|
||||||
{
|
{
|
||||||
static_assert(IsBaseOf<InputType, OutputType>::value);
|
static_assert(IsBaseOf<InputType, OutputType>::value);
|
||||||
ASSERT(is<OutputType>(input));
|
VERIFY(is<OutputType>(input));
|
||||||
return static_cast<CopyConst<InputType, OutputType>&>(input);
|
return static_cast<CopyConst<InputType, OutputType>&>(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ UUID::UUID(Array<u8, 16> uuid_buffer)
|
||||||
|
|
||||||
void UUID::convert_string_view_to_uuid(const StringView& uuid_string_view)
|
void UUID::convert_string_view_to_uuid(const StringView& uuid_string_view)
|
||||||
{
|
{
|
||||||
ASSERT(uuid_string_view.length() == 36);
|
VERIFY(uuid_string_view.length() == 36);
|
||||||
auto first_unit = decode_hex(uuid_string_view.substring_view(0, 8));
|
auto first_unit = decode_hex(uuid_string_view.substring_view(0, 8));
|
||||||
auto second_unit = decode_hex(uuid_string_view.substring_view(9, 4));
|
auto second_unit = decode_hex(uuid_string_view.substring_view(9, 4));
|
||||||
auto third_unit = decode_hex(uuid_string_view.substring_view(14, 4));
|
auto third_unit = decode_hex(uuid_string_view.substring_view(14, 4));
|
||||||
|
|
|
@ -51,7 +51,7 @@ public:
|
||||||
}
|
}
|
||||||
Utf32CodepointIterator& operator++()
|
Utf32CodepointIterator& operator++()
|
||||||
{
|
{
|
||||||
ASSERT(m_length > 0);
|
VERIFY(m_length > 0);
|
||||||
m_ptr++;
|
m_ptr++;
|
||||||
m_length--;
|
m_length--;
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -62,7 +62,7 @@ public:
|
||||||
}
|
}
|
||||||
u32 operator*() const
|
u32 operator*() const
|
||||||
{
|
{
|
||||||
ASSERT(m_length > 0);
|
VERIFY(m_length > 0);
|
||||||
return *m_ptr;
|
return *m_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ public:
|
||||||
: m_code_points(code_points)
|
: m_code_points(code_points)
|
||||||
, m_length(length)
|
, m_length(length)
|
||||||
{
|
{
|
||||||
ASSERT(code_points || length == 0);
|
VERIFY(code_points || length == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
Utf32CodepointIterator begin() const
|
Utf32CodepointIterator begin() const
|
||||||
|
@ -107,8 +107,8 @@ public:
|
||||||
|
|
||||||
size_t iterator_offset(const Utf32CodepointIterator& it) const
|
size_t iterator_offset(const Utf32CodepointIterator& it) const
|
||||||
{
|
{
|
||||||
ASSERT(it.m_ptr >= m_code_points);
|
VERIFY(it.m_ptr >= m_code_points);
|
||||||
ASSERT(it.m_ptr < m_code_points + m_length);
|
VERIFY(it.m_ptr < m_code_points + m_length);
|
||||||
return ((ptrdiff_t)it.m_ptr - (ptrdiff_t)m_code_points) / sizeof(u32);
|
return ((ptrdiff_t)it.m_ptr - (ptrdiff_t)m_code_points) / sizeof(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,9 +116,9 @@ public:
|
||||||
{
|
{
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
return {};
|
return {};
|
||||||
ASSERT(offset < m_length);
|
VERIFY(offset < m_length);
|
||||||
ASSERT(!Checked<size_t>::addition_would_overflow(offset, length));
|
VERIFY(!Checked<size_t>::addition_would_overflow(offset, length));
|
||||||
ASSERT((offset + length) <= m_length);
|
VERIFY((offset + length) <= m_length);
|
||||||
return Utf32View(m_code_points + offset, length);
|
return Utf32View(m_code_points + offset, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,8 +67,8 @@ Utf8CodepointIterator Utf8View::end() const
|
||||||
|
|
||||||
size_t Utf8View::byte_offset_of(const Utf8CodepointIterator& it) const
|
size_t Utf8View::byte_offset_of(const Utf8CodepointIterator& it) const
|
||||||
{
|
{
|
||||||
ASSERT(it.m_ptr >= begin_ptr());
|
VERIFY(it.m_ptr >= begin_ptr());
|
||||||
ASSERT(it.m_ptr <= end_ptr());
|
VERIFY(it.m_ptr <= end_ptr());
|
||||||
|
|
||||||
return it.m_ptr - begin_ptr();
|
return it.m_ptr - begin_ptr();
|
||||||
}
|
}
|
||||||
|
@ -162,15 +162,15 @@ bool Utf8CodepointIterator::operator!=(const Utf8CodepointIterator& other) const
|
||||||
|
|
||||||
Utf8CodepointIterator& Utf8CodepointIterator::operator++()
|
Utf8CodepointIterator& Utf8CodepointIterator::operator++()
|
||||||
{
|
{
|
||||||
ASSERT(m_length > 0);
|
VERIFY(m_length > 0);
|
||||||
|
|
||||||
size_t code_point_length_in_bytes = 0;
|
size_t code_point_length_in_bytes = 0;
|
||||||
u32 value;
|
u32 value;
|
||||||
bool first_byte_makes_sense = decode_first_byte(*m_ptr, code_point_length_in_bytes, value);
|
bool first_byte_makes_sense = decode_first_byte(*m_ptr, code_point_length_in_bytes, value);
|
||||||
|
|
||||||
ASSERT(first_byte_makes_sense);
|
VERIFY(first_byte_makes_sense);
|
||||||
|
|
||||||
ASSERT(code_point_length_in_bytes <= m_length);
|
VERIFY(code_point_length_in_bytes <= m_length);
|
||||||
m_ptr += code_point_length_in_bytes;
|
m_ptr += code_point_length_in_bytes;
|
||||||
m_length -= code_point_length_in_bytes;
|
m_length -= code_point_length_in_bytes;
|
||||||
|
|
||||||
|
@ -179,17 +179,17 @@ Utf8CodepointIterator& Utf8CodepointIterator::operator++()
|
||||||
|
|
||||||
size_t Utf8CodepointIterator::code_point_length_in_bytes() const
|
size_t Utf8CodepointIterator::code_point_length_in_bytes() const
|
||||||
{
|
{
|
||||||
ASSERT(m_length > 0);
|
VERIFY(m_length > 0);
|
||||||
size_t code_point_length_in_bytes = 0;
|
size_t code_point_length_in_bytes = 0;
|
||||||
u32 value;
|
u32 value;
|
||||||
bool first_byte_makes_sense = decode_first_byte(*m_ptr, code_point_length_in_bytes, value);
|
bool first_byte_makes_sense = decode_first_byte(*m_ptr, code_point_length_in_bytes, value);
|
||||||
ASSERT(first_byte_makes_sense);
|
VERIFY(first_byte_makes_sense);
|
||||||
return code_point_length_in_bytes;
|
return code_point_length_in_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 Utf8CodepointIterator::operator*() const
|
u32 Utf8CodepointIterator::operator*() const
|
||||||
{
|
{
|
||||||
ASSERT(m_length > 0);
|
VERIFY(m_length > 0);
|
||||||
|
|
||||||
u32 code_point_value_so_far = 0;
|
u32 code_point_value_so_far = 0;
|
||||||
size_t code_point_length_in_bytes = 0;
|
size_t code_point_length_in_bytes = 0;
|
||||||
|
@ -197,13 +197,13 @@ u32 Utf8CodepointIterator::operator*() const
|
||||||
bool first_byte_makes_sense = decode_first_byte(m_ptr[0], code_point_length_in_bytes, code_point_value_so_far);
|
bool first_byte_makes_sense = decode_first_byte(m_ptr[0], code_point_length_in_bytes, code_point_value_so_far);
|
||||||
if (!first_byte_makes_sense)
|
if (!first_byte_makes_sense)
|
||||||
dbgln("First byte doesn't make sense, bytes: {}", StringView { (const char*)m_ptr, m_length });
|
dbgln("First byte doesn't make sense, bytes: {}", StringView { (const char*)m_ptr, m_length });
|
||||||
ASSERT(first_byte_makes_sense);
|
VERIFY(first_byte_makes_sense);
|
||||||
if (code_point_length_in_bytes > m_length)
|
if (code_point_length_in_bytes > m_length)
|
||||||
dbgln("Not enough bytes (need {}, have {}), first byte is: {:#02x}, '{}'", code_point_length_in_bytes, m_length, m_ptr[0], (const char*)m_ptr);
|
dbgln("Not enough bytes (need {}, have {}), first byte is: {:#02x}, '{}'", code_point_length_in_bytes, m_length, m_ptr[0], (const char*)m_ptr);
|
||||||
ASSERT(code_point_length_in_bytes <= m_length);
|
VERIFY(code_point_length_in_bytes <= m_length);
|
||||||
|
|
||||||
for (size_t offset = 1; offset < code_point_length_in_bytes; offset++) {
|
for (size_t offset = 1; offset < code_point_length_in_bytes; offset++) {
|
||||||
ASSERT(m_ptr[offset] >> 6 == 2);
|
VERIFY(m_ptr[offset] >> 6 == 2);
|
||||||
code_point_value_so_far <<= 6;
|
code_point_value_so_far <<= 6;
|
||||||
code_point_value_so_far |= m_ptr[offset] & 63;
|
code_point_value_so_far |= m_ptr[offset] & 63;
|
||||||
}
|
}
|
||||||
|
|
22
AK/Vector.h
22
AK/Vector.h
|
@ -191,12 +191,12 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE const T& at(size_t i) const
|
ALWAYS_INLINE const T& at(size_t i) const
|
||||||
{
|
{
|
||||||
ASSERT(i < m_size);
|
VERIFY(i < m_size);
|
||||||
return data()[i];
|
return data()[i];
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE T& at(size_t i)
|
ALWAYS_INLINE T& at(size_t i)
|
||||||
{
|
{
|
||||||
ASSERT(i < m_size);
|
VERIFY(i < m_size);
|
||||||
return data()[i];
|
return data()[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,7 +211,7 @@ public:
|
||||||
|
|
||||||
T take_last()
|
T take_last()
|
||||||
{
|
{
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
T value = move(last());
|
T value = move(last());
|
||||||
last().~T();
|
last().~T();
|
||||||
--m_size;
|
--m_size;
|
||||||
|
@ -220,7 +220,7 @@ public:
|
||||||
|
|
||||||
T take_first()
|
T take_first()
|
||||||
{
|
{
|
||||||
ASSERT(!is_empty());
|
VERIFY(!is_empty());
|
||||||
T value = move(first());
|
T value = move(first());
|
||||||
remove(0);
|
remove(0);
|
||||||
return value;
|
return value;
|
||||||
|
@ -235,14 +235,14 @@ public:
|
||||||
|
|
||||||
T unstable_take(size_t index)
|
T unstable_take(size_t index)
|
||||||
{
|
{
|
||||||
ASSERT(index < m_size);
|
VERIFY(index < m_size);
|
||||||
swap(at(index), at(m_size - 1));
|
swap(at(index), at(m_size - 1));
|
||||||
return take_last();
|
return take_last();
|
||||||
}
|
}
|
||||||
|
|
||||||
void remove(size_t index)
|
void remove(size_t index)
|
||||||
{
|
{
|
||||||
ASSERT(index < m_size);
|
VERIFY(index < m_size);
|
||||||
|
|
||||||
if constexpr (Traits<T>::is_trivial()) {
|
if constexpr (Traits<T>::is_trivial()) {
|
||||||
TypedTransfer<T>::copy(slot(index), slot(index + 1), m_size - index - 1);
|
TypedTransfer<T>::copy(slot(index), slot(index + 1), m_size - index - 1);
|
||||||
|
@ -261,8 +261,8 @@ public:
|
||||||
{
|
{
|
||||||
if (count == 0)
|
if (count == 0)
|
||||||
return;
|
return;
|
||||||
ASSERT(index + count > index);
|
VERIFY(index + count > index);
|
||||||
ASSERT(index + count <= m_size);
|
VERIFY(index + count <= m_size);
|
||||||
|
|
||||||
if constexpr (Traits<T>::is_trivial()) {
|
if constexpr (Traits<T>::is_trivial()) {
|
||||||
TypedTransfer<T>::copy(slot(index), slot(index + count), m_size - index - count);
|
TypedTransfer<T>::copy(slot(index), slot(index + count), m_size - index - count);
|
||||||
|
@ -281,7 +281,7 @@ public:
|
||||||
template<typename U = T>
|
template<typename U = T>
|
||||||
void insert(size_t index, U&& value)
|
void insert(size_t index, U&& value)
|
||||||
{
|
{
|
||||||
ASSERT(index <= size());
|
VERIFY(index <= size());
|
||||||
if (index == size())
|
if (index == size())
|
||||||
return append(forward<U>(value));
|
return append(forward<U>(value));
|
||||||
grow_capacity(size() + 1);
|
grow_capacity(size() + 1);
|
||||||
|
@ -403,7 +403,7 @@ public:
|
||||||
template<typename U = T>
|
template<typename U = T>
|
||||||
ALWAYS_INLINE void unchecked_append(U&& value)
|
ALWAYS_INLINE void unchecked_append(U&& value)
|
||||||
{
|
{
|
||||||
ASSERT((size() + 1) <= capacity());
|
VERIFY((size() + 1) <= capacity());
|
||||||
new (slot(m_size)) T(forward<U>(value));
|
new (slot(m_size)) T(forward<U>(value));
|
||||||
++m_size;
|
++m_size;
|
||||||
}
|
}
|
||||||
|
@ -506,7 +506,7 @@ public:
|
||||||
|
|
||||||
void shrink(size_t new_size, bool keep_capacity = false)
|
void shrink(size_t new_size, bool keep_capacity = false)
|
||||||
{
|
{
|
||||||
ASSERT(new_size <= size());
|
VERIFY(new_size <= size());
|
||||||
if (new_size == size())
|
if (new_size == size())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -229,7 +229,7 @@ inline WeakPtr<U> Weakable<T>::make_weak_ptr() const
|
||||||
if (static_cast<const T*>(this)->unref()) {
|
if (static_cast<const T*>(this)->unref()) {
|
||||||
// We just dropped the last reference, which should have called
|
// We just dropped the last reference, which should have called
|
||||||
// revoke_weak_ptrs, which should have invalidated our weak_ptr
|
// revoke_weak_ptrs, which should have invalidated our weak_ptr
|
||||||
ASSERT(!weak_ptr.strong_ref());
|
VERIFY(!weak_ptr.strong_ref());
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ public:
|
||||||
void revoke()
|
void revoke()
|
||||||
{
|
{
|
||||||
auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed);
|
auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed);
|
||||||
ASSERT(!(current_consumers & 1u));
|
VERIFY(!(current_consumers & 1u));
|
||||||
// We flagged revokation, now wait until everyone trying to obtain
|
// We flagged revokation, now wait until everyone trying to obtain
|
||||||
// a strong reference is done
|
// a strong reference is done
|
||||||
while (current_consumers > 0) {
|
while (current_consumers > 0) {
|
||||||
|
|
|
@ -40,39 +40,39 @@ UNMAP_AFTER_INIT DynamicParser::DynamicParser(PhysicalAddress rsdp)
|
||||||
void DynamicParser::handle_irq(const RegisterState&)
|
void DynamicParser::handle_irq(const RegisterState&)
|
||||||
{
|
{
|
||||||
// FIXME: Implement IRQ handling of ACPI signals!
|
// FIXME: Implement IRQ handling of ACPI signals!
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DynamicParser::enable_aml_interpretation()
|
void DynamicParser::enable_aml_interpretation()
|
||||||
{
|
{
|
||||||
// FIXME: Implement AML Interpretation
|
// FIXME: Implement AML Interpretation
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
void DynamicParser::enable_aml_interpretation(File&)
|
void DynamicParser::enable_aml_interpretation(File&)
|
||||||
{
|
{
|
||||||
// FIXME: Implement AML Interpretation
|
// FIXME: Implement AML Interpretation
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
void DynamicParser::enable_aml_interpretation(u8*, u32)
|
void DynamicParser::enable_aml_interpretation(u8*, u32)
|
||||||
{
|
{
|
||||||
// FIXME: Implement AML Interpretation
|
// FIXME: Implement AML Interpretation
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
void DynamicParser::disable_aml_interpretation()
|
void DynamicParser::disable_aml_interpretation()
|
||||||
{
|
{
|
||||||
// FIXME: Implement AML Interpretation
|
// FIXME: Implement AML Interpretation
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
void DynamicParser::try_acpi_shutdown()
|
void DynamicParser::try_acpi_shutdown()
|
||||||
{
|
{
|
||||||
// FIXME: Implement AML Interpretation to perform ACPI shutdown
|
// FIXME: Implement AML Interpretation to perform ACPI shutdown
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DynamicParser::build_namespace()
|
void DynamicParser::build_namespace()
|
||||||
{
|
{
|
||||||
// FIXME: Implement AML Interpretation to build the ACPI namespace
|
// FIXME: Implement AML Interpretation to build the ACPI namespace
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ UNMAP_AFTER_INIT void MultiProcessorParser::parse_configuration_table()
|
||||||
entry = (MultiProcessor::EntryHeader*)(FlatPtr)entry + sizeof(MultiProcessor::CompatibilityBusAddressSpaceModifierEntry);
|
entry = (MultiProcessor::EntryHeader*)(FlatPtr)entry + sizeof(MultiProcessor::CompatibilityBusAddressSpaceModifierEntry);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
--entry_count;
|
--entry_count;
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ Parser* Parser::the()
|
||||||
|
|
||||||
void Parser::set_the(Parser& parser)
|
void Parser::set_the(Parser& parser)
|
||||||
{
|
{
|
||||||
ASSERT(!s_acpi_parser);
|
VERIFY(!s_acpi_parser);
|
||||||
s_acpi_parser = &parser;
|
s_acpi_parser = &parser;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT void Parser::init_fadt()
|
||||||
klog() << "ACPI: Searching for the Fixed ACPI Data Table";
|
klog() << "ACPI: Searching for the Fixed ACPI Data Table";
|
||||||
|
|
||||||
m_fadt = find_table("FACP");
|
m_fadt = find_table("FACP");
|
||||||
ASSERT(!m_fadt.is_null());
|
VERIFY(!m_fadt.is_null());
|
||||||
|
|
||||||
auto sdt = map_typed<Structures::FADT>(m_fadt);
|
auto sdt = map_typed<Structures::FADT>(m_fadt);
|
||||||
|
|
||||||
|
@ -148,13 +148,13 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
|
||||||
switch (structure.access_size) {
|
switch (structure.access_size) {
|
||||||
case (u8)GenericAddressStructure::AccessSize::QWord: {
|
case (u8)GenericAddressStructure::AccessSize::QWord: {
|
||||||
dbgln("Trying to send QWord to IO port");
|
dbgln("Trying to send QWord to IO port");
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case (u8)GenericAddressStructure::AccessSize::Undefined: {
|
case (u8)GenericAddressStructure::AccessSize::Undefined: {
|
||||||
dbgln("ACPI Warning: Unknown access size {}", structure.access_size);
|
dbgln("ACPI Warning: Unknown access size {}", structure.access_size);
|
||||||
ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord);
|
VERIFY(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord);
|
||||||
ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined);
|
VERIFY(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined);
|
||||||
dbgln("ACPI: Bit Width - {} bits", structure.bit_width);
|
dbgln("ACPI: Bit Width - {} bits", structure.bit_width);
|
||||||
address.out(value, structure.bit_width);
|
address.out(value, structure.bit_width);
|
||||||
break;
|
break;
|
||||||
|
@ -182,7 +182,7 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -193,16 +193,16 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
|
||||||
u32 offset_in_pci_address = structure.address & 0xFFFF;
|
u32 offset_in_pci_address = structure.address & 0xFFFF;
|
||||||
if (structure.access_size == (u8)GenericAddressStructure::AccessSize::QWord) {
|
if (structure.access_size == (u8)GenericAddressStructure::AccessSize::QWord) {
|
||||||
dbgln("Trying to send QWord to PCI configuration space");
|
dbgln("Trying to send QWord to PCI configuration space");
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
ASSERT(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined);
|
VERIFY(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined);
|
||||||
PCI::raw_access(pci_address, offset_in_pci_address, (1 << (structure.access_size - 1)), value);
|
PCI::raw_access(pci_address, offset_in_pci_address, (1 << (structure.access_size - 1)), value);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Parser::validate_reset_register()
|
bool Parser::validate_reset_register()
|
||||||
|
@ -222,7 +222,7 @@ void Parser::try_acpi_reboot()
|
||||||
dbgln_if(ACPI_DEBUG, "ACPI: Rebooting, Probing FADT ({})", m_fadt);
|
dbgln_if(ACPI_DEBUG, "ACPI: Rebooting, Probing FADT ({})", m_fadt);
|
||||||
|
|
||||||
auto fadt = map_typed<Structures::FADT>(m_fadt);
|
auto fadt = map_typed<Structures::FADT>(m_fadt);
|
||||||
ASSERT(validate_reset_register());
|
VERIFY(validate_reset_register());
|
||||||
access_generic_address(fadt->reset_reg, fadt->reset_value);
|
access_generic_address(fadt->reset_reg, fadt->reset_value);
|
||||||
Processor::halt();
|
Processor::halt();
|
||||||
}
|
}
|
||||||
|
@ -255,7 +255,7 @@ UNMAP_AFTER_INIT void Parser::initialize_main_system_description_table()
|
||||||
#if ACPI_DEBUG
|
#if ACPI_DEBUG
|
||||||
dbgln("ACPI: Checking Main SDT Length to choose the correct mapping size");
|
dbgln("ACPI: Checking Main SDT Length to choose the correct mapping size");
|
||||||
#endif
|
#endif
|
||||||
ASSERT(!m_main_system_description_table.is_null());
|
VERIFY(!m_main_system_description_table.is_null());
|
||||||
auto length = get_table_size(m_main_system_description_table);
|
auto length = get_table_size(m_main_system_description_table);
|
||||||
auto revision = get_table_revision(m_main_system_description_table);
|
auto revision = get_table_revision(m_main_system_description_table);
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ UNMAP_AFTER_INIT Optional<PhysicalAddress> StaticParsing::find_rsdp()
|
||||||
UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_address, const StringView& signature)
|
UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_address, const StringView& signature)
|
||||||
{
|
{
|
||||||
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
||||||
ASSERT(signature.length() == 4);
|
VERIFY(signature.length() == 4);
|
||||||
|
|
||||||
auto rsdp = map_typed<Structures::RSDPDescriptor20>(rsdp_address);
|
auto rsdp = map_typed<Structures::RSDPDescriptor20>(rsdp_address);
|
||||||
|
|
||||||
|
@ -345,13 +345,13 @@ UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_
|
||||||
return search_table_in_xsdt(PhysicalAddress(rsdp->xsdt_ptr), signature);
|
return search_table_in_xsdt(PhysicalAddress(rsdp->xsdt_ptr), signature);
|
||||||
return search_table_in_rsdt(PhysicalAddress(rsdp->base.rsdt_ptr), signature);
|
return search_table_in_rsdt(PhysicalAddress(rsdp->base.rsdt_ptr), signature);
|
||||||
}
|
}
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsdt_address, const StringView& signature)
|
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsdt_address, const StringView& signature)
|
||||||
{
|
{
|
||||||
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
||||||
ASSERT(signature.length() == 4);
|
VERIFY(signature.length() == 4);
|
||||||
|
|
||||||
auto xsdt = map_typed<Structures::XSDT>(xsdt_address);
|
auto xsdt = map_typed<Structures::XSDT>(xsdt_address);
|
||||||
|
|
||||||
|
@ -365,7 +365,7 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsd
|
||||||
static bool match_table_signature(PhysicalAddress table_header, const StringView& signature)
|
static bool match_table_signature(PhysicalAddress table_header, const StringView& signature)
|
||||||
{
|
{
|
||||||
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
||||||
ASSERT(signature.length() == 4);
|
VERIFY(signature.length() == 4);
|
||||||
|
|
||||||
auto table = map_typed<Structures::RSDT>(table_header);
|
auto table = map_typed<Structures::RSDT>(table_header);
|
||||||
return !strncmp(table->h.sig, signature.characters_without_null_termination(), 4);
|
return !strncmp(table->h.sig, signature.characters_without_null_termination(), 4);
|
||||||
|
@ -374,7 +374,7 @@ static bool match_table_signature(PhysicalAddress table_header, const StringView
|
||||||
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsdt_address, const StringView& signature)
|
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsdt_address, const StringView& signature)
|
||||||
{
|
{
|
||||||
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
|
||||||
ASSERT(signature.length() == 4);
|
VERIFY(signature.length() == 4);
|
||||||
|
|
||||||
auto rsdt = map_typed<Structures::RSDT>(rsdt_address);
|
auto rsdt = map_typed<Structures::RSDT>(rsdt_address);
|
||||||
|
|
||||||
|
@ -387,22 +387,22 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsd
|
||||||
|
|
||||||
void Parser::enable_aml_interpretation()
|
void Parser::enable_aml_interpretation()
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Parser::enable_aml_interpretation(File&)
|
void Parser::enable_aml_interpretation(File&)
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Parser::enable_aml_interpretation(u8*, u32)
|
void Parser::enable_aml_interpretation(u8*, u32)
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Parser::disable_aml_interpretation()
|
void Parser::disable_aml_interpretation()
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -319,7 +319,7 @@ void page_fault_handler(TrapFrame* trap)
|
||||||
dbgln("Continuing after resolved page fault");
|
dbgln("Continuing after resolved page fault");
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,7 +390,7 @@ static void unimp_trap()
|
||||||
|
|
||||||
GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
|
GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
|
||||||
{
|
{
|
||||||
ASSERT(s_interrupt_handler[interrupt_number] != nullptr);
|
VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
|
||||||
return *s_interrupt_handler[interrupt_number];
|
return *s_interrupt_handler[interrupt_number];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -401,14 +401,14 @@ static void revert_to_unused_handler(u8 interrupt_number)
|
||||||
|
|
||||||
void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
|
void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
|
||||||
{
|
{
|
||||||
ASSERT(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
|
VERIFY(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
|
||||||
if (s_interrupt_handler[interrupt_number] != nullptr) {
|
if (s_interrupt_handler[interrupt_number] != nullptr) {
|
||||||
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
|
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
|
||||||
s_interrupt_handler[interrupt_number] = &handler;
|
s_interrupt_handler[interrupt_number] = &handler;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
|
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
|
||||||
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
|
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
|
||||||
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
|
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -417,7 +417,7 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
|
||||||
static_cast<SpuriousInterruptHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
|
static_cast<SpuriousInterruptHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
|
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
|
||||||
auto& previous_handler = *s_interrupt_handler[interrupt_number];
|
auto& previous_handler = *s_interrupt_handler[interrupt_number];
|
||||||
s_interrupt_handler[interrupt_number] = nullptr;
|
s_interrupt_handler[interrupt_number] = nullptr;
|
||||||
SharedIRQHandler::initialize(interrupt_number);
|
SharedIRQHandler::initialize(interrupt_number);
|
||||||
|
@ -425,7 +425,7 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
|
||||||
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
|
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
} else {
|
} else {
|
||||||
s_interrupt_handler[interrupt_number] = &handler;
|
s_interrupt_handler[interrupt_number] = &handler;
|
||||||
}
|
}
|
||||||
|
@ -433,13 +433,13 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
|
||||||
|
|
||||||
void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
|
void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
|
||||||
{
|
{
|
||||||
ASSERT(s_interrupt_handler[interrupt_number] != nullptr);
|
VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
|
||||||
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
|
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
|
||||||
dbgln("Trying to unregister unused handler (?)");
|
dbgln("Trying to unregister unused handler (?)");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
|
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
|
||||||
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
|
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
|
||||||
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->unregister_handler(handler);
|
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->unregister_handler(handler);
|
||||||
if (!static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->sharing_devices_count()) {
|
if (!static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->sharing_devices_count()) {
|
||||||
revert_to_unused_handler(interrupt_number);
|
revert_to_unused_handler(interrupt_number);
|
||||||
|
@ -447,11 +447,11 @@ void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptH
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!s_interrupt_handler[interrupt_number]->is_shared_handler()) {
|
if (!s_interrupt_handler[interrupt_number]->is_shared_handler()) {
|
||||||
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
|
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
|
||||||
revert_to_unused_handler(interrupt_number);
|
revert_to_unused_handler(interrupt_number);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*f)())
|
UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*f)())
|
||||||
|
@ -692,11 +692,11 @@ void handle_interrupt(TrapFrame* trap)
|
||||||
{
|
{
|
||||||
clac();
|
clac();
|
||||||
auto& regs = *trap->regs;
|
auto& regs = *trap->regs;
|
||||||
ASSERT(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
|
VERIFY(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
|
||||||
u8 irq = (u8)(regs.isr_number - 0x50);
|
u8 irq = (u8)(regs.isr_number - 0x50);
|
||||||
s_entropy_source_interrupts.add_random_event(irq);
|
s_entropy_source_interrupts.add_random_event(irq);
|
||||||
auto* handler = s_interrupt_handler[irq];
|
auto* handler = s_interrupt_handler[irq];
|
||||||
ASSERT(handler);
|
VERIFY(handler);
|
||||||
handler->increment_invoking_counter();
|
handler->increment_invoking_counter();
|
||||||
handler->handle_interrupt(regs);
|
handler->handle_interrupt(regs);
|
||||||
handler->eoi();
|
handler->eoi();
|
||||||
|
@ -792,7 +792,7 @@ static volatile bool s_smp_enabled;
|
||||||
|
|
||||||
Vector<Processor*>& Processor::processors()
|
Vector<Processor*>& Processor::processors()
|
||||||
{
|
{
|
||||||
ASSERT(s_processors);
|
VERIFY(s_processors);
|
||||||
return *s_processors;
|
return *s_processors;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -803,8 +803,8 @@ Processor& Processor::by_id(u32 cpu)
|
||||||
// for all APs to finish, after which this array never gets modified
|
// for all APs to finish, after which this array never gets modified
|
||||||
// again, so it's safe to not protect access to it here
|
// again, so it's safe to not protect access to it here
|
||||||
auto& procs = processors();
|
auto& procs = processors();
|
||||||
ASSERT(procs[cpu] != nullptr);
|
VERIFY(procs[cpu] != nullptr);
|
||||||
ASSERT(procs.size() > cpu);
|
VERIFY(procs.size() > cpu);
|
||||||
return *procs[cpu];
|
return *procs[cpu];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -861,7 +861,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
|
||||||
|
|
||||||
u32 max_extended_leaf = CPUID(0x80000000).eax();
|
u32 max_extended_leaf = CPUID(0x80000000).eax();
|
||||||
|
|
||||||
ASSERT(max_extended_leaf >= 0x80000001);
|
VERIFY(max_extended_leaf >= 0x80000001);
|
||||||
CPUID extended_processor_info(0x80000001);
|
CPUID extended_processor_info(0x80000001);
|
||||||
if (extended_processor_info.edx() & (1 << 20))
|
if (extended_processor_info.edx() & (1 << 20))
|
||||||
set_feature(CPUFeature::NX);
|
set_feature(CPUFeature::NX);
|
||||||
|
@ -1049,14 +1049,14 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
|
||||||
cpu_setup();
|
cpu_setup();
|
||||||
gdt_init();
|
gdt_init();
|
||||||
|
|
||||||
ASSERT(is_initialized()); // sanity check
|
VERIFY(is_initialized()); // sanity check
|
||||||
ASSERT(¤t() == this); // sanity check
|
VERIFY(¤t() == this); // sanity check
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
|
UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
|
||||||
{
|
{
|
||||||
ASSERT(m_self == this);
|
VERIFY(m_self == this);
|
||||||
ASSERT(¤t() == this); // sanity check
|
VERIFY(¤t() == this); // sanity check
|
||||||
|
|
||||||
dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
|
dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
|
||||||
if (!has_feature(CPUFeature::RDRAND))
|
if (!has_feature(CPUFeature::RDRAND))
|
||||||
|
@ -1069,7 +1069,7 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
|
||||||
flush_idt();
|
flush_idt();
|
||||||
|
|
||||||
if (cpu == 0) {
|
if (cpu == 0) {
|
||||||
ASSERT((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
|
VERIFY((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
|
||||||
asm volatile("fninit");
|
asm volatile("fninit");
|
||||||
asm volatile("fxsave %0"
|
asm volatile("fxsave %0"
|
||||||
: "=m"(s_clean_fpu_state));
|
: "=m"(s_clean_fpu_state));
|
||||||
|
@ -1095,7 +1095,7 @@ void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
|
||||||
|
|
||||||
if (i > m_gdt_length) {
|
if (i > m_gdt_length) {
|
||||||
m_gdt_length = i + 1;
|
m_gdt_length = i + 1;
|
||||||
ASSERT(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
|
VERIFY(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
|
||||||
m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
|
m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
|
||||||
}
|
}
|
||||||
m_gdt[i].low = low;
|
m_gdt[i].low = low;
|
||||||
|
@ -1178,14 +1178,14 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
|
||||||
// reflect the status at the last context switch.
|
// reflect the status at the last context switch.
|
||||||
ScopedSpinLock lock(g_scheduler_lock);
|
ScopedSpinLock lock(g_scheduler_lock);
|
||||||
if (&thread == Processor::current_thread()) {
|
if (&thread == Processor::current_thread()) {
|
||||||
ASSERT(thread.state() == Thread::Running);
|
VERIFY(thread.state() == Thread::Running);
|
||||||
// Leave the scheduler lock. If we trigger page faults we may
|
// Leave the scheduler lock. If we trigger page faults we may
|
||||||
// need to be preempted. Since this is our own thread it won't
|
// need to be preempted. Since this is our own thread it won't
|
||||||
// cause any problems as the stack won't change below this frame.
|
// cause any problems as the stack won't change below this frame.
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
capture_current_thread();
|
capture_current_thread();
|
||||||
} else if (thread.is_active()) {
|
} else if (thread.is_active()) {
|
||||||
ASSERT(thread.cpu() != Processor::id());
|
VERIFY(thread.cpu() != Processor::id());
|
||||||
// If this is the case, the thread is currently running
|
// If this is the case, the thread is currently running
|
||||||
// on another processor. We can't trust the kernel stack as
|
// on another processor. We can't trust the kernel stack as
|
||||||
// it may be changing at any time. We need to probably send
|
// it may be changing at any time. We need to probably send
|
||||||
|
@ -1197,8 +1197,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
|
||||||
[&]() {
|
[&]() {
|
||||||
dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
|
dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
|
||||||
ProcessPagingScope paging_scope(thread.process());
|
ProcessPagingScope paging_scope(thread.process());
|
||||||
ASSERT(&Processor::current() != &proc);
|
VERIFY(&Processor::current() != &proc);
|
||||||
ASSERT(&thread == Processor::current_thread());
|
VERIFY(&thread == Processor::current_thread());
|
||||||
// NOTE: Because the other processor is still holding the
|
// NOTE: Because the other processor is still holding the
|
||||||
// scheduler lock while waiting for this callback to finish,
|
// scheduler lock while waiting for this callback to finish,
|
||||||
// the current thread on the target processor cannot change
|
// the current thread on the target processor cannot change
|
||||||
|
@ -1212,7 +1212,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
|
||||||
} else {
|
} else {
|
||||||
switch (thread.state()) {
|
switch (thread.state()) {
|
||||||
case Thread::Running:
|
case Thread::Running:
|
||||||
ASSERT_NOT_REACHED(); // should have been handled above
|
VERIFY_NOT_REACHED(); // should have been handled above
|
||||||
case Thread::Runnable:
|
case Thread::Runnable:
|
||||||
case Thread::Stopped:
|
case Thread::Stopped:
|
||||||
case Thread::Blocked:
|
case Thread::Blocked:
|
||||||
|
@ -1251,8 +1251,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
|
||||||
|
|
||||||
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
|
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
|
||||||
{
|
{
|
||||||
ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);
|
VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running);
|
||||||
ASSERT(to_thread->state() == Thread::Running);
|
VERIFY(to_thread->state() == Thread::Running);
|
||||||
|
|
||||||
Processor::set_current_thread(*to_thread);
|
Processor::set_current_thread(*to_thread);
|
||||||
|
|
||||||
|
@ -1287,9 +1287,9 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
|
||||||
|
|
||||||
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
||||||
{
|
{
|
||||||
ASSERT(!in_irq());
|
VERIFY(!in_irq());
|
||||||
ASSERT(m_in_critical == 1);
|
VERIFY(m_in_critical == 1);
|
||||||
ASSERT(is_kernel_mode());
|
VERIFY(is_kernel_mode());
|
||||||
|
|
||||||
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
|
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
|
||||||
from_thread->save_critical(m_in_critical);
|
from_thread->save_critical(m_in_critical);
|
||||||
|
@ -1344,12 +1344,12 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
||||||
|
|
||||||
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
|
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
|
||||||
{
|
{
|
||||||
ASSERT(!are_interrupts_enabled());
|
VERIFY(!are_interrupts_enabled());
|
||||||
ASSERT(is_kernel_mode());
|
VERIFY(is_kernel_mode());
|
||||||
|
|
||||||
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
|
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
|
||||||
|
|
||||||
ASSERT(to_thread == Thread::current());
|
VERIFY(to_thread == Thread::current());
|
||||||
|
|
||||||
Scheduler::enter_current(*from_thread, true);
|
Scheduler::enter_current(*from_thread, true);
|
||||||
|
|
||||||
|
@ -1388,13 +1388,13 @@ void exit_kernel_thread(void)
|
||||||
|
|
||||||
u32 Processor::init_context(Thread& thread, bool leave_crit)
|
u32 Processor::init_context(Thread& thread, bool leave_crit)
|
||||||
{
|
{
|
||||||
ASSERT(is_kernel_mode());
|
VERIFY(is_kernel_mode());
|
||||||
ASSERT(g_scheduler_lock.is_locked());
|
VERIFY(g_scheduler_lock.is_locked());
|
||||||
if (leave_crit) {
|
if (leave_crit) {
|
||||||
// Leave the critical section we set up in in Process::exec,
|
// Leave the critical section we set up in in Process::exec,
|
||||||
// but because we still have the scheduler lock we should end up with 1
|
// but because we still have the scheduler lock we should end up with 1
|
||||||
m_in_critical--; // leave it without triggering anything or restoring flags
|
m_in_critical--; // leave it without triggering anything or restoring flags
|
||||||
ASSERT(in_critical() == 1);
|
VERIFY(in_critical() == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 kernel_stack_top = thread.kernel_stack_top();
|
u32 kernel_stack_top = thread.kernel_stack_top();
|
||||||
|
@ -1405,7 +1405,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
|
||||||
u32 stack_top = kernel_stack_top;
|
u32 stack_top = kernel_stack_top;
|
||||||
|
|
||||||
// TODO: handle NT?
|
// TODO: handle NT?
|
||||||
ASSERT((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
|
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
|
||||||
|
|
||||||
auto& tss = thread.tss();
|
auto& tss = thread.tss();
|
||||||
bool return_to_user = (tss.cs & 3) != 0;
|
bool return_to_user = (tss.cs & 3) != 0;
|
||||||
|
@ -1503,7 +1503,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
|
||||||
|
|
||||||
extern "C" u32 do_init_context(Thread* thread, u32 flags)
|
extern "C" u32 do_init_context(Thread* thread, u32 flags)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
VERIFY_INTERRUPTS_DISABLED();
|
||||||
thread->tss().eflags = flags;
|
thread->tss().eflags = flags;
|
||||||
return Processor::current().init_context(*thread, true);
|
return Processor::current().init_context(*thread, true);
|
||||||
}
|
}
|
||||||
|
@ -1536,18 +1536,18 @@ void Processor::assume_context(Thread& thread, u32 flags)
|
||||||
{
|
{
|
||||||
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
|
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
|
||||||
|
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
VERIFY_INTERRUPTS_DISABLED();
|
||||||
Scheduler::prepare_after_exec();
|
Scheduler::prepare_after_exec();
|
||||||
// in_critical() should be 2 here. The critical section in Process::exec
|
// in_critical() should be 2 here. The critical section in Process::exec
|
||||||
// and then the scheduler lock
|
// and then the scheduler lock
|
||||||
ASSERT(Processor::current().in_critical() == 2);
|
VERIFY(Processor::current().in_critical() == 2);
|
||||||
do_assume_context(&thread, flags);
|
do_assume_context(&thread, flags);
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
|
extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
|
||||||
{
|
{
|
||||||
ASSERT(g_scheduler_lock.own_lock());
|
VERIFY(g_scheduler_lock.own_lock());
|
||||||
|
|
||||||
// Because init_finished() will wait on the other APs, we need
|
// Because init_finished() will wait on the other APs, we need
|
||||||
// to release the scheduler lock so that the other APs can also get
|
// to release the scheduler lock so that the other APs can also get
|
||||||
|
@ -1567,7 +1567,7 @@ extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
|
UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
|
||||||
{
|
{
|
||||||
ASSERT(initial_thread.process().is_kernel_process());
|
VERIFY(initial_thread.process().is_kernel_process());
|
||||||
|
|
||||||
auto& tss = initial_thread.tss();
|
auto& tss = initial_thread.tss();
|
||||||
m_tss = tss;
|
m_tss = tss;
|
||||||
|
@ -1605,13 +1605,13 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
|
||||||
);
|
);
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
|
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
VERIFY_INTERRUPTS_DISABLED();
|
||||||
ASSERT(&Processor::current() == this);
|
VERIFY(&Processor::current() == this);
|
||||||
trap.prev_irq_level = m_in_irq;
|
trap.prev_irq_level = m_in_irq;
|
||||||
if (raise_irq)
|
if (raise_irq)
|
||||||
m_in_irq++;
|
m_in_irq++;
|
||||||
|
@ -1629,9 +1629,9 @@ void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
|
||||||
|
|
||||||
void Processor::exit_trap(TrapFrame& trap)
|
void Processor::exit_trap(TrapFrame& trap)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
VERIFY_INTERRUPTS_DISABLED();
|
||||||
ASSERT(&Processor::current() == this);
|
VERIFY(&Processor::current() == this);
|
||||||
ASSERT(m_in_irq >= trap.prev_irq_level);
|
VERIFY(m_in_irq >= trap.prev_irq_level);
|
||||||
m_in_irq = trap.prev_irq_level;
|
m_in_irq = trap.prev_irq_level;
|
||||||
|
|
||||||
smp_process_pending_messages();
|
smp_process_pending_messages();
|
||||||
|
@ -1644,7 +1644,7 @@ void Processor::exit_trap(TrapFrame& trap)
|
||||||
auto& current_trap = current_thread->current_trap();
|
auto& current_trap = current_thread->current_trap();
|
||||||
current_trap = trap.next_trap;
|
current_trap = trap.next_trap;
|
||||||
if (current_trap) {
|
if (current_trap) {
|
||||||
ASSERT(current_trap->regs);
|
VERIFY(current_trap->regs);
|
||||||
// If we have another higher level trap then we probably returned
|
// If we have another higher level trap then we probably returned
|
||||||
// from an interrupt or irq handler. The cs register of the
|
// from an interrupt or irq handler. The cs register of the
|
||||||
// new/higher level trap tells us what the mode prior to it was
|
// new/higher level trap tells us what the mode prior to it was
|
||||||
|
@ -1659,8 +1659,8 @@ void Processor::exit_trap(TrapFrame& trap)
|
||||||
|
|
||||||
void Processor::check_invoke_scheduler()
|
void Processor::check_invoke_scheduler()
|
||||||
{
|
{
|
||||||
ASSERT(!m_in_irq);
|
VERIFY(!m_in_irq);
|
||||||
ASSERT(!m_in_critical);
|
VERIFY(!m_in_critical);
|
||||||
if (m_invoke_scheduler_async && m_scheduler_initialized) {
|
if (m_invoke_scheduler_async && m_scheduler_initialized) {
|
||||||
m_invoke_scheduler_async = false;
|
m_invoke_scheduler_async = false;
|
||||||
Scheduler::invoke_async();
|
Scheduler::invoke_async();
|
||||||
|
@ -1724,7 +1724,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(msg != nullptr);
|
VERIFY(msg != nullptr);
|
||||||
return *msg;
|
return *msg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1732,15 +1732,15 @@ Atomic<u32> Processor::s_idle_cpu_mask { 0 };
|
||||||
|
|
||||||
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
||||||
{
|
{
|
||||||
ASSERT(Processor::current().in_critical());
|
VERIFY(Processor::current().in_critical());
|
||||||
ASSERT(wake_count > 0);
|
VERIFY(wake_count > 0);
|
||||||
if (!s_smp_enabled)
|
if (!s_smp_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// Wake at most N - 1 processors
|
// Wake at most N - 1 processors
|
||||||
if (wake_count >= Processor::count()) {
|
if (wake_count >= Processor::count()) {
|
||||||
wake_count = Processor::count() - 1;
|
wake_count = Processor::count() - 1;
|
||||||
ASSERT(wake_count > 0);
|
VERIFY(wake_count > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 current_id = Processor::current().id();
|
u32 current_id = Processor::current().id();
|
||||||
|
@ -1853,7 +1853,7 @@ bool Processor::smp_process_pending_messages()
|
||||||
case ProcessorMessage::FlushTlb:
|
case ProcessorMessage::FlushTlb:
|
||||||
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
|
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
|
||||||
// We assume that we don't cross into kernel land!
|
// We assume that we don't cross into kernel land!
|
||||||
ASSERT(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
|
VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
|
||||||
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
|
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
|
||||||
// This processor isn't using this page directory right now, we can ignore this request
|
// This processor isn't using this page directory right now, we can ignore this request
|
||||||
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
|
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
|
||||||
|
@ -1866,7 +1866,7 @@ bool Processor::smp_process_pending_messages()
|
||||||
|
|
||||||
bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
|
bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
|
||||||
auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
|
auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
|
||||||
ASSERT(prev_refs != 0);
|
VERIFY(prev_refs != 0);
|
||||||
if (prev_refs == 1) {
|
if (prev_refs == 1) {
|
||||||
// All processors handled this. If this is an async message,
|
// All processors handled this. If this is an async message,
|
||||||
// we need to clean it up and return it to the pool
|
// we need to clean it up and return it to the pool
|
||||||
|
@ -1894,7 +1894,7 @@ bool Processor::smp_queue_message(ProcessorMessage& msg)
|
||||||
// the queue at any given time. We rely on the fact that the messages
|
// the queue at any given time. We rely on the fact that the messages
|
||||||
// are pooled and never get freed!
|
// are pooled and never get freed!
|
||||||
auto& msg_entry = msg.per_proc_entries[id()];
|
auto& msg_entry = msg.per_proc_entries[id()];
|
||||||
ASSERT(msg_entry.msg == &msg);
|
VERIFY(msg_entry.msg == &msg);
|
||||||
ProcessorMessageEntry* next = nullptr;
|
ProcessorMessageEntry* next = nullptr;
|
||||||
do {
|
do {
|
||||||
msg_entry.next = next;
|
msg_entry.next = next;
|
||||||
|
@ -1909,7 +1909,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg)
|
||||||
dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
|
dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
|
||||||
|
|
||||||
atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
|
atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
|
||||||
ASSERT(msg.refs > 0);
|
VERIFY(msg.refs > 0);
|
||||||
bool need_broadcast = false;
|
bool need_broadcast = false;
|
||||||
for_each(
|
for_each(
|
||||||
[&](Processor& proc) -> IterationDecision {
|
[&](Processor& proc) -> IterationDecision {
|
||||||
|
@ -1928,7 +1928,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg)
|
||||||
void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
|
void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
|
||||||
{
|
{
|
||||||
auto& cur_proc = Processor::current();
|
auto& cur_proc = Processor::current();
|
||||||
ASSERT(!msg.async);
|
VERIFY(!msg.async);
|
||||||
// If synchronous then we must cleanup and return the message back
|
// If synchronous then we must cleanup and return the message back
|
||||||
// to the pool. Otherwise, the last processor to complete it will return it
|
// to the pool. Otherwise, the last processor to complete it will return it
|
||||||
while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
|
while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
|
||||||
|
@ -1971,7 +1971,7 @@ void Processor::smp_broadcast(void (*callback)(), bool async)
|
||||||
void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
|
void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
|
||||||
{
|
{
|
||||||
auto& cur_proc = Processor::current();
|
auto& cur_proc = Processor::current();
|
||||||
ASSERT(cpu != cur_proc.get_id());
|
VERIFY(cpu != cur_proc.get_id());
|
||||||
auto& target_proc = processors()[cpu];
|
auto& target_proc = processors()[cpu];
|
||||||
msg.async = async;
|
msg.async = async;
|
||||||
|
|
||||||
|
@ -2068,8 +2068,8 @@ UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
|
||||||
|
|
||||||
void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
|
void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
|
||||||
{
|
{
|
||||||
ASSERT(m_in_critical);
|
VERIFY(m_in_critical);
|
||||||
ASSERT(!entry->was_allocated);
|
VERIFY(!entry->was_allocated);
|
||||||
|
|
||||||
entry->next = m_free_deferred_call_pool_entry;
|
entry->next = m_free_deferred_call_pool_entry;
|
||||||
m_free_deferred_call_pool_entry = entry;
|
m_free_deferred_call_pool_entry = entry;
|
||||||
|
@ -2077,13 +2077,13 @@ void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
|
||||||
|
|
||||||
DeferredCallEntry* Processor::deferred_call_get_free()
|
DeferredCallEntry* Processor::deferred_call_get_free()
|
||||||
{
|
{
|
||||||
ASSERT(m_in_critical);
|
VERIFY(m_in_critical);
|
||||||
|
|
||||||
if (m_free_deferred_call_pool_entry) {
|
if (m_free_deferred_call_pool_entry) {
|
||||||
// Fast path, we have an entry in our pool
|
// Fast path, we have an entry in our pool
|
||||||
auto* entry = m_free_deferred_call_pool_entry;
|
auto* entry = m_free_deferred_call_pool_entry;
|
||||||
m_free_deferred_call_pool_entry = entry->next;
|
m_free_deferred_call_pool_entry = entry->next;
|
||||||
ASSERT(!entry->was_allocated);
|
VERIFY(!entry->was_allocated);
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2094,7 +2094,7 @@ DeferredCallEntry* Processor::deferred_call_get_free()
|
||||||
|
|
||||||
void Processor::deferred_call_execute_pending()
|
void Processor::deferred_call_execute_pending()
|
||||||
{
|
{
|
||||||
ASSERT(m_in_critical);
|
VERIFY(m_in_critical);
|
||||||
|
|
||||||
if (!m_pending_deferred_calls)
|
if (!m_pending_deferred_calls)
|
||||||
return;
|
return;
|
||||||
|
@ -2137,7 +2137,7 @@ void Processor::deferred_call_execute_pending()
|
||||||
|
|
||||||
void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
|
void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
|
||||||
{
|
{
|
||||||
ASSERT(m_in_critical);
|
VERIFY(m_in_critical);
|
||||||
entry->next = m_pending_deferred_calls;
|
entry->next = m_pending_deferred_calls;
|
||||||
m_pending_deferred_calls = entry;
|
m_pending_deferred_calls = entry;
|
||||||
}
|
}
|
||||||
|
|
|
@ -912,14 +912,14 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE void restore_irq(u32 prev_irq)
|
ALWAYS_INLINE void restore_irq(u32 prev_irq)
|
||||||
{
|
{
|
||||||
ASSERT(prev_irq <= m_in_irq);
|
VERIFY(prev_irq <= m_in_irq);
|
||||||
if (!prev_irq) {
|
if (!prev_irq) {
|
||||||
u32 prev_critical = 0;
|
u32 prev_critical = 0;
|
||||||
if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
|
if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
|
||||||
m_in_irq = prev_irq;
|
m_in_irq = prev_irq;
|
||||||
deferred_call_execute_pending();
|
deferred_call_execute_pending();
|
||||||
auto prev_raised = m_in_critical.exchange(prev_critical);
|
auto prev_raised = m_in_critical.exchange(prev_critical);
|
||||||
ASSERT(prev_raised == prev_critical + 1);
|
VERIFY(prev_raised == prev_critical + 1);
|
||||||
check_invoke_scheduler();
|
check_invoke_scheduler();
|
||||||
} else if (prev_critical == 0) {
|
} else if (prev_critical == 0) {
|
||||||
check_invoke_scheduler();
|
check_invoke_scheduler();
|
||||||
|
@ -949,11 +949,11 @@ public:
|
||||||
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
||||||
{
|
{
|
||||||
cli(); // Need to prevent IRQs from interrupting us here!
|
cli(); // Need to prevent IRQs from interrupting us here!
|
||||||
ASSERT(m_in_critical > 0);
|
VERIFY(m_in_critical > 0);
|
||||||
if (m_in_critical == 1) {
|
if (m_in_critical == 1) {
|
||||||
if (!m_in_irq) {
|
if (!m_in_irq) {
|
||||||
deferred_call_execute_pending();
|
deferred_call_execute_pending();
|
||||||
ASSERT(m_in_critical == 1);
|
VERIFY(m_in_critical == 1);
|
||||||
}
|
}
|
||||||
m_in_critical--;
|
m_in_critical--;
|
||||||
if (!m_in_irq)
|
if (!m_in_irq)
|
||||||
|
@ -981,7 +981,7 @@ public:
|
||||||
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
|
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
|
||||||
{
|
{
|
||||||
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
|
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
|
||||||
ASSERT(!prev_crit || !(prev_flags & 0x200));
|
VERIFY(!prev_crit || !(prev_flags & 0x200));
|
||||||
if (prev_flags & 0x200)
|
if (prev_flags & 0x200)
|
||||||
sti();
|
sti();
|
||||||
else
|
else
|
||||||
|
@ -1105,14 +1105,14 @@ public:
|
||||||
|
|
||||||
void leave()
|
void leave()
|
||||||
{
|
{
|
||||||
ASSERT(m_valid);
|
VERIFY(m_valid);
|
||||||
m_valid = false;
|
m_valid = false;
|
||||||
Processor::current().leave_critical(m_prev_flags);
|
Processor::current().leave_critical(m_prev_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void enter()
|
void enter()
|
||||||
{
|
{
|
||||||
ASSERT(!m_valid);
|
VERIFY(!m_valid);
|
||||||
m_valid = true;
|
m_valid = true;
|
||||||
Processor::current().enter_critical(m_prev_flags);
|
Processor::current().enter_critical(m_prev_flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ ProcessorInfo::ProcessorInfo(Processor& processor)
|
||||||
m_cpuid = builder.build();
|
m_cpuid = builder.build();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
ASSERT(max_leaf >= 1);
|
VERIFY(max_leaf >= 1);
|
||||||
CPUID cpuid(1);
|
CPUID cpuid(1);
|
||||||
m_stepping = cpuid.eax() & 0xf;
|
m_stepping = cpuid.eax() & 0xf;
|
||||||
u32 model = (cpuid.eax() >> 4) & 0xf;
|
u32 model = (cpuid.eax() >> 4) & 0xf;
|
||||||
|
|
|
@ -31,11 +31,11 @@
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
[[noreturn]] void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func);
|
[[noreturn]] void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func);
|
||||||
# define ASSERT(expr) (static_cast<bool>(expr) ? void(0) : __assertion_failed(# expr, __FILE__, __LINE__, __PRETTY_FUNCTION__))
|
# define VERIFY(expr) (static_cast<bool>(expr) ? void(0) : __assertion_failed(# expr, __FILE__, __LINE__, __PRETTY_FUNCTION__))
|
||||||
# define ASSERT_NOT_REACHED() ASSERT(false)
|
# define VERIFY_NOT_REACHED() VERIFY(false)
|
||||||
#else
|
#else
|
||||||
# define ASSERT(expr)
|
# define VERIFY(expr)
|
||||||
# define ASSERT_NOT_REACHED() CRASH()
|
# define VERIFY_NOT_REACHED() CRASH()
|
||||||
#endif
|
#endif
|
||||||
#define CRASH() \
|
#define CRASH() \
|
||||||
do { \
|
do { \
|
||||||
|
@ -47,6 +47,6 @@
|
||||||
CRASH(); \
|
CRASH(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define ASSERT_INTERRUPTS_DISABLED() ASSERT(!(cpu_flags() & 0x200))
|
#define VERIFY_INTERRUPTS_DISABLED() VERIFY(!(cpu_flags() & 0x200))
|
||||||
#define ASSERT_INTERRUPTS_ENABLED() ASSERT(cpu_flags() & 0x200)
|
#define VERIFY_INTERRUPTS_ENABLED() VERIFY(cpu_flags() & 0x200)
|
||||||
#define TODO ASSERT_NOT_REACHED
|
#define TODO VERIFY_NOT_REACHED
|
||||||
|
|
|
@ -45,13 +45,13 @@ UNMAP_AFTER_INIT void CommandLine::early_initialize(const char* cmd_line)
|
||||||
|
|
||||||
const CommandLine& kernel_command_line()
|
const CommandLine& kernel_command_line()
|
||||||
{
|
{
|
||||||
ASSERT(s_the);
|
VERIFY(s_the);
|
||||||
return *s_the;
|
return *s_the;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void CommandLine::initialize()
|
UNMAP_AFTER_INIT void CommandLine::initialize()
|
||||||
{
|
{
|
||||||
ASSERT(!s_the);
|
VERIFY(!s_the);
|
||||||
s_the = new CommandLine(s_cmd_line);
|
s_the = new CommandLine(s_cmd_line);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ size_t DMIExpose::structure_table_length() const
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void DMIExpose::initialize_exposer()
|
UNMAP_AFTER_INIT void DMIExpose::initialize_exposer()
|
||||||
{
|
{
|
||||||
ASSERT(!(m_entry_point.is_null()));
|
VERIFY(!(m_entry_point.is_null()));
|
||||||
if (m_using_64bit_entry_point) {
|
if (m_using_64bit_entry_point) {
|
||||||
set_64_bit_entry_initialization_values();
|
set_64_bit_entry_initialization_values();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -39,8 +39,8 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
ASSERT(is_completed_result(m_result));
|
VERIFY(is_completed_result(m_result));
|
||||||
ASSERT(m_sub_requests_pending.is_empty());
|
VERIFY(m_sub_requests_pending.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
// We should not need any locking here anymore. The destructor should
|
// We should not need any locking here anymore. The destructor should
|
||||||
|
@ -50,8 +50,8 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
|
||||||
// Which means there should be no more pending sub-requests and the
|
// Which means there should be no more pending sub-requests and the
|
||||||
// entire AsyncDeviceRequest hierarchy should be immutable.
|
// entire AsyncDeviceRequest hierarchy should be immutable.
|
||||||
for (auto& sub_request : m_sub_requests_complete) {
|
for (auto& sub_request : m_sub_requests_complete) {
|
||||||
ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
|
VERIFY(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
|
||||||
ASSERT(sub_request.m_parent_request == this);
|
VERIFY(sub_request.m_parent_request == this);
|
||||||
sub_request.m_parent_request = nullptr;
|
sub_request.m_parent_request = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ void AsyncDeviceRequest::request_finished()
|
||||||
|
|
||||||
auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
|
auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
|
||||||
{
|
{
|
||||||
ASSERT(!m_parent_request);
|
VERIFY(!m_parent_request);
|
||||||
auto request_result = get_request_result();
|
auto request_result = get_request_result();
|
||||||
if (is_completed_result(request_result))
|
if (is_completed_result(request_result))
|
||||||
return { request_result, Thread::BlockResult::NotBlocked };
|
return { request_result, Thread::BlockResult::NotBlocked };
|
||||||
|
@ -87,14 +87,14 @@ auto AsyncDeviceRequest::get_request_result() const -> RequestResult
|
||||||
void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
|
void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
|
||||||
{
|
{
|
||||||
// Sub-requests cannot be for the same device
|
// Sub-requests cannot be for the same device
|
||||||
ASSERT(&m_device != &sub_request->m_device);
|
VERIFY(&m_device != &sub_request->m_device);
|
||||||
ASSERT(sub_request->m_parent_request == nullptr);
|
VERIFY(sub_request->m_parent_request == nullptr);
|
||||||
sub_request->m_parent_request = this;
|
sub_request->m_parent_request = this;
|
||||||
|
|
||||||
bool should_start;
|
bool should_start;
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
ASSERT(!is_completed_result(m_result));
|
VERIFY(!is_completed_result(m_result));
|
||||||
m_sub_requests_pending.append(sub_request);
|
m_sub_requests_pending.append(sub_request);
|
||||||
should_start = (m_result == Started);
|
should_start = (m_result == Started);
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
|
||||||
bool all_completed;
|
bool all_completed;
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
ASSERT(m_result == Started);
|
VERIFY(m_result == Started);
|
||||||
size_t index;
|
size_t index;
|
||||||
for (index = 0; index < m_sub_requests_pending.size(); index++) {
|
for (index = 0; index < m_sub_requests_pending.size(); index++) {
|
||||||
if (&m_sub_requests_pending[index] == &sub_request) {
|
if (&m_sub_requests_pending[index] == &sub_request) {
|
||||||
|
@ -117,7 +117,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT(index < m_sub_requests_pending.size());
|
VERIFY(index < m_sub_requests_pending.size());
|
||||||
all_completed = m_sub_requests_pending.is_empty();
|
all_completed = m_sub_requests_pending.is_empty();
|
||||||
if (all_completed) {
|
if (all_completed) {
|
||||||
// Aggregate any errors
|
// Aggregate any errors
|
||||||
|
@ -126,7 +126,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
|
||||||
for (index = 0; index < m_sub_requests_complete.size(); index++) {
|
for (index = 0; index < m_sub_requests_complete.size(); index++) {
|
||||||
auto& sub_request = m_sub_requests_complete[index];
|
auto& sub_request = m_sub_requests_complete[index];
|
||||||
auto sub_result = sub_request.get_request_result();
|
auto sub_result = sub_request.get_request_result();
|
||||||
ASSERT(is_completed_result(sub_result));
|
VERIFY(is_completed_result(sub_result));
|
||||||
switch (sub_result) {
|
switch (sub_result) {
|
||||||
case Failure:
|
case Failure:
|
||||||
any_failures = true;
|
any_failures = true;
|
||||||
|
@ -154,11 +154,11 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
|
||||||
|
|
||||||
void AsyncDeviceRequest::complete(RequestResult result)
|
void AsyncDeviceRequest::complete(RequestResult result)
|
||||||
{
|
{
|
||||||
ASSERT(result == Success || result == Failure || result == MemoryFault);
|
VERIFY(result == Success || result == Failure || result == MemoryFault);
|
||||||
ScopedCritical critical;
|
ScopedCritical critical;
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
ASSERT(m_result == Started);
|
VERIFY(m_result == Started);
|
||||||
m_result = result;
|
m_result = result;
|
||||||
}
|
}
|
||||||
if (Processor::current().in_irq()) {
|
if (Processor::current().in_irq()) {
|
||||||
|
|
|
@ -87,7 +87,7 @@ public:
|
||||||
|
|
||||||
void set_private(void* priv)
|
void set_private(void* priv)
|
||||||
{
|
{
|
||||||
ASSERT(!m_private || !priv);
|
VERIFY(!m_private || !priv);
|
||||||
m_private = priv;
|
m_private = priv;
|
||||||
}
|
}
|
||||||
void* get_private() const { return m_private; }
|
void* get_private() const { return m_private; }
|
||||||
|
|
|
@ -101,7 +101,7 @@ u16 BXVGADevice::get_register(u16 index)
|
||||||
void BXVGADevice::revert_resolution()
|
void BXVGADevice::revert_resolution()
|
||||||
{
|
{
|
||||||
set_resolution_registers(m_framebuffer_width, m_framebuffer_height);
|
set_resolution_registers(m_framebuffer_width, m_framebuffer_height);
|
||||||
ASSERT(validate_setup_resolution(m_framebuffer_width, m_framebuffer_height));
|
VERIFY(validate_setup_resolution(m_framebuffer_width, m_framebuffer_height));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BXVGADevice::set_resolution_registers(size_t width, size_t height)
|
void BXVGADevice::set_resolution_registers(size_t width, size_t height)
|
||||||
|
@ -152,7 +152,7 @@ bool BXVGADevice::validate_setup_resolution(size_t width, size_t height)
|
||||||
|
|
||||||
void BXVGADevice::set_y_offset(size_t y_offset)
|
void BXVGADevice::set_y_offset(size_t y_offset)
|
||||||
{
|
{
|
||||||
ASSERT(y_offset == 0 || y_offset == m_framebuffer_height);
|
VERIFY(y_offset == 0 || y_offset == m_framebuffer_height);
|
||||||
m_y_offset = y_offset;
|
m_y_offset = y_offset;
|
||||||
set_register(VBE_DISPI_INDEX_Y_OFFSET, (u16)y_offset);
|
set_register(VBE_DISPI_INDEX_Y_OFFSET, (u16)y_offset);
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer)
|
||||||
dbgln("BlockDevice::read_block({}) cancelled", index);
|
dbgln("BlockDevice::read_block({}) cancelled", index);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& buffer)
|
||||||
dbgln("BlockDevice::write_block({}) cancelled", index);
|
dbgln("BlockDevice::write_block({}) cancelled", index);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ public:
|
||||||
case Write:
|
case Write:
|
||||||
return "BlockDeviceRequest (write)";
|
return "BlockDeviceRequest (write)";
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ Device::Device(unsigned major, unsigned minor)
|
||||||
if (it != all_devices().end()) {
|
if (it != all_devices().end()) {
|
||||||
dbgln("Already registered {},{}: {}", major, minor, it->value->class_name());
|
dbgln("Already registered {},{}: {}", major, minor, it->value->class_name());
|
||||||
}
|
}
|
||||||
ASSERT(!all_devices().contains(device_id));
|
VERIFY(!all_devices().contains(device_id));
|
||||||
all_devices().set(device_id, this);
|
all_devices().set(device_id, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,8 +86,8 @@ void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncD
|
||||||
|
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_requests_lock);
|
ScopedSpinLock lock(m_requests_lock);
|
||||||
ASSERT(!m_requests.is_empty());
|
VERIFY(!m_requests.is_empty());
|
||||||
ASSERT(m_requests.first().ptr() == &completed_request);
|
VERIFY(m_requests.first().ptr() == &completed_request);
|
||||||
m_requests.remove(m_requests.begin());
|
m_requests.remove(m_requests.begin());
|
||||||
if (!m_requests.is_empty())
|
if (!m_requests.is_empty())
|
||||||
next_request = m_requests.first().ptr();
|
next_request = m_requests.first().ptr();
|
||||||
|
|
|
@ -41,13 +41,13 @@ UNMAP_AFTER_INIT void I8042Controller::initialize()
|
||||||
|
|
||||||
I8042Controller& I8042Controller::the()
|
I8042Controller& I8042Controller::the()
|
||||||
{
|
{
|
||||||
ASSERT(s_the);
|
VERIFY(s_the);
|
||||||
return *s_the;
|
return *s_the;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT I8042Controller::I8042Controller()
|
UNMAP_AFTER_INIT I8042Controller::I8042Controller()
|
||||||
{
|
{
|
||||||
ASSERT(!s_the);
|
VERIFY(!s_the);
|
||||||
s_the = this;
|
s_the = this;
|
||||||
|
|
||||||
u8 configuration;
|
u8 configuration;
|
||||||
|
@ -148,7 +148,7 @@ UNMAP_AFTER_INIT I8042Controller::I8042Controller()
|
||||||
|
|
||||||
void I8042Controller::irq_process_input_buffer(Device)
|
void I8042Controller::irq_process_input_buffer(Device)
|
||||||
{
|
{
|
||||||
ASSERT(Processor::current().in_irq());
|
VERIFY(Processor::current().in_irq());
|
||||||
|
|
||||||
u8 status = IO::in8(I8042_STATUS);
|
u8 status = IO::in8(I8042_STATUS);
|
||||||
if (!(status & I8042_BUFFER_FULL))
|
if (!(status & I8042_BUFFER_FULL))
|
||||||
|
@ -171,10 +171,10 @@ void I8042Controller::do_drain()
|
||||||
|
|
||||||
bool I8042Controller::do_reset_device(Device device)
|
bool I8042Controller::do_reset_device(Device device)
|
||||||
{
|
{
|
||||||
ASSERT(device != Device::None);
|
VERIFY(device != Device::None);
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
|
|
||||||
ASSERT(!Processor::current().in_irq());
|
VERIFY(!Processor::current().in_irq());
|
||||||
if (do_send_command(device, 0xff) != I8042_ACK)
|
if (do_send_command(device, 0xff) != I8042_ACK)
|
||||||
return false;
|
return false;
|
||||||
// Wait until we get the self-test result
|
// Wait until we get the self-test result
|
||||||
|
@ -183,20 +183,20 @@ bool I8042Controller::do_reset_device(Device device)
|
||||||
|
|
||||||
u8 I8042Controller::do_send_command(Device device, u8 command)
|
u8 I8042Controller::do_send_command(Device device, u8 command)
|
||||||
{
|
{
|
||||||
ASSERT(device != Device::None);
|
VERIFY(device != Device::None);
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
|
|
||||||
ASSERT(!Processor::current().in_irq());
|
VERIFY(!Processor::current().in_irq());
|
||||||
|
|
||||||
return do_write_to_device(device, command);
|
return do_write_to_device(device, command);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
|
u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
|
||||||
{
|
{
|
||||||
ASSERT(device != Device::None);
|
VERIFY(device != Device::None);
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
|
|
||||||
ASSERT(!Processor::current().in_irq());
|
VERIFY(!Processor::current().in_irq());
|
||||||
|
|
||||||
u8 response = do_write_to_device(device, command);
|
u8 response = do_write_to_device(device, command);
|
||||||
if (response == I8042_ACK)
|
if (response == I8042_ACK)
|
||||||
|
@ -206,10 +206,10 @@ u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
|
||||||
|
|
||||||
u8 I8042Controller::do_write_to_device(Device device, u8 data)
|
u8 I8042Controller::do_write_to_device(Device device, u8 data)
|
||||||
{
|
{
|
||||||
ASSERT(device != Device::None);
|
VERIFY(device != Device::None);
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
|
|
||||||
ASSERT(!Processor::current().in_irq());
|
VERIFY(!Processor::current().in_irq());
|
||||||
|
|
||||||
int attempts = 0;
|
int attempts = 0;
|
||||||
u8 response;
|
u8 response;
|
||||||
|
@ -230,7 +230,7 @@ u8 I8042Controller::do_write_to_device(Device device, u8 data)
|
||||||
|
|
||||||
u8 I8042Controller::do_read_from_device(Device device)
|
u8 I8042Controller::do_read_from_device(Device device)
|
||||||
{
|
{
|
||||||
ASSERT(device != Device::None);
|
VERIFY(device != Device::None);
|
||||||
|
|
||||||
prepare_for_input(device);
|
prepare_for_input(device);
|
||||||
return IO::in8(I8042_BUFFER);
|
return IO::in8(I8042_BUFFER);
|
||||||
|
@ -238,7 +238,7 @@ u8 I8042Controller::do_read_from_device(Device device)
|
||||||
|
|
||||||
void I8042Controller::prepare_for_input(Device device)
|
void I8042Controller::prepare_for_input(Device device)
|
||||||
{
|
{
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
const u8 buffer_type = device == Device::Keyboard ? I8042_KEYBOARD_BUFFER : I8042_MOUSE_BUFFER;
|
const u8 buffer_type = device == Device::Keyboard ? I8042_KEYBOARD_BUFFER : I8042_MOUSE_BUFFER;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
u8 status = IO::in8(I8042_STATUS);
|
u8 status = IO::in8(I8042_STATUS);
|
||||||
|
@ -249,7 +249,7 @@ void I8042Controller::prepare_for_input(Device device)
|
||||||
|
|
||||||
void I8042Controller::prepare_for_output()
|
void I8042Controller::prepare_for_output()
|
||||||
{
|
{
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!(IO::in8(I8042_STATUS) & 2))
|
if (!(IO::in8(I8042_STATUS) & 2))
|
||||||
return;
|
return;
|
||||||
|
@ -258,14 +258,14 @@ void I8042Controller::prepare_for_output()
|
||||||
|
|
||||||
void I8042Controller::do_wait_then_write(u8 port, u8 data)
|
void I8042Controller::do_wait_then_write(u8 port, u8 data)
|
||||||
{
|
{
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
prepare_for_output();
|
prepare_for_output();
|
||||||
IO::out8(port, data);
|
IO::out8(port, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 I8042Controller::do_wait_then_read(u8 port)
|
u8 I8042Controller::do_wait_then_read(u8 port)
|
||||||
{
|
{
|
||||||
ASSERT(m_lock.is_locked());
|
VERIFY(m_lock.is_locked());
|
||||||
prepare_for_input(Device::None);
|
prepare_for_input(Device::None);
|
||||||
return IO::in8(port);
|
return IO::in8(port);
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,7 +113,7 @@ private:
|
||||||
|
|
||||||
static int device_to_deviceinfo_index(Device device)
|
static int device_to_deviceinfo_index(Device device)
|
||||||
{
|
{
|
||||||
ASSERT(device != Device::None);
|
VERIFY(device != Device::None);
|
||||||
return (device == Device::Keyboard) ? 0 : 1;
|
return (device == Device::Keyboard) ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -450,7 +450,7 @@ KResultOr<size_t> KeyboardDevice::read(FileDescription&, size_t, UserOrKernelBuf
|
||||||
});
|
});
|
||||||
if (n < 0)
|
if (n < 0)
|
||||||
return KResult((ErrnoCode)-n);
|
return KResult((ErrnoCode)-n);
|
||||||
ASSERT((size_t)n == sizeof(Event));
|
VERIFY((size_t)n == sizeof(Event));
|
||||||
nread += sizeof(Event);
|
nread += sizeof(Event);
|
||||||
|
|
||||||
lock.lock();
|
lock.lock();
|
||||||
|
|
|
@ -114,7 +114,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
|
||||||
evaluate_block_conditions();
|
evaluate_block_conditions();
|
||||||
};
|
};
|
||||||
|
|
||||||
ASSERT(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
|
VERIFY(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
|
||||||
m_data.bytes[m_data_state] = byte;
|
m_data.bytes[m_data_state] = byte;
|
||||||
|
|
||||||
switch (m_data_state) {
|
switch (m_data_state) {
|
||||||
|
@ -136,7 +136,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
|
||||||
commit_packet();
|
commit_packet();
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
ASSERT(m_has_wheel);
|
VERIFY(m_has_wheel);
|
||||||
commit_packet();
|
commit_packet();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -275,7 +275,7 @@ bool PS2MouseDevice::can_read(const FileDescription&, size_t) const
|
||||||
|
|
||||||
KResultOr<size_t> PS2MouseDevice::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t size)
|
KResultOr<size_t> PS2MouseDevice::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t size)
|
||||||
{
|
{
|
||||||
ASSERT(size > 0);
|
VERIFY(size > 0);
|
||||||
size_t nread = 0;
|
size_t nread = 0;
|
||||||
size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread;
|
size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread;
|
||||||
ScopedSpinLock lock(m_queue_lock);
|
ScopedSpinLock lock(m_queue_lock);
|
||||||
|
|
|
@ -153,7 +153,7 @@ void SB16::set_irq_register(u8 irq_number)
|
||||||
bitmask = 0b1000;
|
bitmask = 0b1000;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
IO::out8(0x224, 0x80);
|
IO::out8(0x224, 0x80);
|
||||||
IO::out8(0x225, bitmask);
|
IO::out8(0x225, bitmask);
|
||||||
|
@ -258,7 +258,7 @@ KResultOr<size_t> SB16::write(FileDescription&, size_t, const UserOrKernelBuffer
|
||||||
#if SB16_DEBUG
|
#if SB16_DEBUG
|
||||||
klog() << "SB16: Writing buffer of " << length << " bytes";
|
klog() << "SB16: Writing buffer of " << length << " bytes";
|
||||||
#endif
|
#endif
|
||||||
ASSERT(length <= PAGE_SIZE);
|
VERIFY(length <= PAGE_SIZE);
|
||||||
const int BLOCK_SIZE = 32 * 1024;
|
const int BLOCK_SIZE = 32 * 1024;
|
||||||
if (length > BLOCK_SIZE) {
|
if (length > BLOCK_SIZE) {
|
||||||
return ENOSPC;
|
return ENOSPC;
|
||||||
|
|
|
@ -296,7 +296,7 @@ QueueHead* UHCIController::allocate_queue_head() const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_NOT_REACHED(); // Let's just assert for now, this should never happen
|
VERIFY_NOT_REACHED(); // Let's just assert for now, this should never happen
|
||||||
return nullptr; // Huh!? We're outta queue heads!
|
return nullptr; // Huh!? We're outta queue heads!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -312,7 +312,7 @@ TransferDescriptor* UHCIController::allocate_transfer_descriptor() const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_NOT_REACHED(); // Let's just assert for now, this should never happen
|
VERIFY_NOT_REACHED(); // Let's just assert for now, this should never happen
|
||||||
return nullptr; // Huh?! We're outta TDs!!
|
return nullptr; // Huh?! We're outta TDs!!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,13 +105,13 @@ struct alignas(16) TransferDescriptor final {
|
||||||
void set_in_use(bool in_use) { m_in_use = in_use; }
|
void set_in_use(bool in_use) { m_in_use = in_use; }
|
||||||
void set_max_len(u16 max_len)
|
void set_max_len(u16 max_len)
|
||||||
{
|
{
|
||||||
ASSERT(max_len < 0x500 || max_len == 0x7ff);
|
VERIFY(max_len < 0x500 || max_len == 0x7ff);
|
||||||
m_token |= (max_len << 21);
|
m_token |= (max_len << 21);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_device_address(u8 address)
|
void set_device_address(u8 address)
|
||||||
{
|
{
|
||||||
ASSERT(address <= 0x7f);
|
VERIFY(address <= 0x7f);
|
||||||
m_token |= (address << 8);
|
m_token |= (address << 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ void DoubleBuffer::flip()
|
||||||
{
|
{
|
||||||
if (m_storage.is_null())
|
if (m_storage.is_null())
|
||||||
return;
|
return;
|
||||||
ASSERT(m_read_buffer_index == m_read_buffer->size);
|
VERIFY(m_read_buffer_index == m_read_buffer->size);
|
||||||
swap(m_read_buffer, m_write_buffer);
|
swap(m_read_buffer, m_write_buffer);
|
||||||
m_write_buffer->size = 0;
|
m_write_buffer->size = 0;
|
||||||
m_read_buffer_index = 0;
|
m_read_buffer_index = 0;
|
||||||
|
@ -64,7 +64,7 @@ ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
|
||||||
{
|
{
|
||||||
if (!size || m_storage.is_null())
|
if (!size || m_storage.is_null())
|
||||||
return 0;
|
return 0;
|
||||||
ASSERT(size > 0);
|
VERIFY(size > 0);
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
size_t bytes_to_write = min(size, m_space_for_writing);
|
size_t bytes_to_write = min(size, m_space_for_writing);
|
||||||
u8* write_ptr = m_write_buffer->data + m_write_buffer->size;
|
u8* write_ptr = m_write_buffer->data + m_write_buffer->size;
|
||||||
|
@ -81,7 +81,7 @@ ssize_t DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
|
||||||
{
|
{
|
||||||
if (!size || m_storage.is_null())
|
if (!size || m_storage.is_null())
|
||||||
return 0;
|
return 0;
|
||||||
ASSERT(size > 0);
|
VERIFY(size > 0);
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0)
|
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0)
|
||||||
flip();
|
flip();
|
||||||
|
|
|
@ -56,7 +56,7 @@ public:
|
||||||
|
|
||||||
void set_unblock_callback(Function<void()> callback)
|
void set_unblock_callback(Function<void()> callback)
|
||||||
{
|
{
|
||||||
ASSERT(!m_unblock_callback);
|
VERIFY(!m_unblock_callback);
|
||||||
m_unblock_callback = move(callback);
|
m_unblock_callback = move(callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ public:
|
||||||
{
|
{
|
||||||
if (auto it = m_hash.find(block_index); it != m_hash.end()) {
|
if (auto it = m_hash.find(block_index); it != m_hash.end()) {
|
||||||
auto& entry = const_cast<CacheEntry&>(*it->value);
|
auto& entry = const_cast<CacheEntry&>(*it->value);
|
||||||
ASSERT(entry.block_index == block_index);
|
VERIFY(entry.block_index == block_index);
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ public:
|
||||||
return get(block_index);
|
return get(block_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(m_clean_list.last());
|
VERIFY(m_clean_list.last());
|
||||||
auto& new_entry = *m_clean_list.last();
|
auto& new_entry = *m_clean_list.last();
|
||||||
m_clean_list.prepend(new_entry);
|
m_clean_list.prepend(new_entry);
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ private:
|
||||||
BlockBasedFS::BlockBasedFS(FileDescription& file_description)
|
BlockBasedFS::BlockBasedFS(FileDescription& file_description)
|
||||||
: FileBackedFS(file_description)
|
: FileBackedFS(file_description)
|
||||||
{
|
{
|
||||||
ASSERT(file_description.file().is_seekable());
|
VERIFY(file_description.file().is_seekable());
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockBasedFS::~BlockBasedFS()
|
BlockBasedFS::~BlockBasedFS()
|
||||||
|
@ -136,8 +136,8 @@ BlockBasedFS::~BlockBasedFS()
|
||||||
|
|
||||||
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
|
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
|
||||||
{
|
{
|
||||||
ASSERT(m_logical_block_size);
|
VERIFY(m_logical_block_size);
|
||||||
ASSERT(offset + count <= block_size());
|
VERIFY(offset + count <= block_size());
|
||||||
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
|
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
|
||||||
|
|
||||||
if (!allow_cache) {
|
if (!allow_cache) {
|
||||||
|
@ -147,7 +147,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
|
||||||
auto nwritten = file_description().write(data, count);
|
auto nwritten = file_description().write(data, count);
|
||||||
if (nwritten.is_error())
|
if (nwritten.is_error())
|
||||||
return nwritten.error();
|
return nwritten.error();
|
||||||
ASSERT(nwritten.value() == count);
|
VERIFY(nwritten.value() == count);
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,8 +171,8 @@ bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
|
||||||
u32 base_offset = index.value() * m_logical_block_size;
|
u32 base_offset = index.value() * m_logical_block_size;
|
||||||
file_description().seek(base_offset, SEEK_SET);
|
file_description().seek(base_offset, SEEK_SET);
|
||||||
auto nread = file_description().read(buffer, m_logical_block_size);
|
auto nread = file_description().read(buffer, m_logical_block_size);
|
||||||
ASSERT(!nread.is_error());
|
VERIFY(!nread.is_error());
|
||||||
ASSERT(nread.value() == m_logical_block_size);
|
VERIFY(nread.value() == m_logical_block_size);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
|
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
|
||||||
|
@ -180,8 +180,8 @@ bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
|
||||||
size_t base_offset = index.value() * m_logical_block_size;
|
size_t base_offset = index.value() * m_logical_block_size;
|
||||||
file_description().seek(base_offset, SEEK_SET);
|
file_description().seek(base_offset, SEEK_SET);
|
||||||
auto nwritten = file_description().write(buffer, m_logical_block_size);
|
auto nwritten = file_description().write(buffer, m_logical_block_size);
|
||||||
ASSERT(!nwritten.is_error());
|
VERIFY(!nwritten.is_error());
|
||||||
ASSERT(nwritten.value() == m_logical_block_size);
|
VERIFY(nwritten.value() == m_logical_block_size);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOr
|
||||||
|
|
||||||
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
|
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
|
||||||
{
|
{
|
||||||
ASSERT(m_logical_block_size);
|
VERIFY(m_logical_block_size);
|
||||||
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
|
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
|
||||||
for (unsigned i = 0; i < count; ++i) {
|
for (unsigned i = 0; i < count; ++i) {
|
||||||
auto result = write_block(BlockIndex { index.value() + i }, data.offset(i * block_size()), block_size(), 0, allow_cache);
|
auto result = write_block(BlockIndex { index.value() + i }, data.offset(i * block_size()), block_size(), 0, allow_cache);
|
||||||
|
@ -220,8 +220,8 @@ KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserO
|
||||||
|
|
||||||
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
|
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
|
||||||
{
|
{
|
||||||
ASSERT(m_logical_block_size);
|
VERIFY(m_logical_block_size);
|
||||||
ASSERT(offset + count <= block_size());
|
VERIFY(offset + count <= block_size());
|
||||||
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
|
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
|
||||||
|
|
||||||
if (!allow_cache) {
|
if (!allow_cache) {
|
||||||
|
@ -231,7 +231,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
|
||||||
auto nread = file_description().read(*buffer, count);
|
auto nread = file_description().read(*buffer, count);
|
||||||
if (nread.is_error())
|
if (nread.is_error())
|
||||||
return nread.error();
|
return nread.error();
|
||||||
ASSERT(nread.value() == count);
|
VERIFY(nread.value() == count);
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
|
||||||
auto nread = file_description().read(entry_data_buffer, block_size());
|
auto nread = file_description().read(entry_data_buffer, block_size());
|
||||||
if (nread.is_error())
|
if (nread.is_error())
|
||||||
return nread.error();
|
return nread.error();
|
||||||
ASSERT(nread.value() == block_size());
|
VERIFY(nread.value() == block_size());
|
||||||
entry.has_data = true;
|
entry.has_data = true;
|
||||||
}
|
}
|
||||||
if (buffer && !buffer->write(entry.data + offset, count))
|
if (buffer && !buffer->write(entry.data + offset, count))
|
||||||
|
@ -253,7 +253,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
|
||||||
|
|
||||||
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
|
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
|
||||||
{
|
{
|
||||||
ASSERT(m_logical_block_size);
|
VERIFY(m_logical_block_size);
|
||||||
if (!count)
|
if (!count)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
if (count == 1)
|
if (count == 1)
|
||||||
|
|
|
@ -61,7 +61,7 @@ size_t DevFS::allocate_inode_index()
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
m_next_inode_index = m_next_inode_index.value() + 1;
|
m_next_inode_index = m_next_inode_index.value() + 1;
|
||||||
ASSERT(m_next_inode_index > 0);
|
VERIFY(m_next_inode_index > 0);
|
||||||
return 1 + m_next_inode_index.value();
|
return 1 + m_next_inode_index.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,17 +102,17 @@ DevFSInode::DevFSInode(DevFS& fs)
|
||||||
}
|
}
|
||||||
ssize_t DevFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
|
ssize_t DevFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
KResult DevFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const
|
KResult DevFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<Inode> DevFSInode::lookup(StringView)
|
RefPtr<Inode> DevFSInode::lookup(StringView)
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DevFSInode::flush_metadata()
|
void DevFSInode::flush_metadata()
|
||||||
|
@ -121,7 +121,7 @@ void DevFSInode::flush_metadata()
|
||||||
|
|
||||||
ssize_t DevFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
|
ssize_t DevFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<NonnullRefPtr<Inode>> DevFSInode::create_child(const String&, mode_t, dev_t, uid_t, gid_t)
|
KResultOr<NonnullRefPtr<Inode>> DevFSInode::create_child(const String&, mode_t, dev_t, uid_t, gid_t)
|
||||||
|
@ -141,7 +141,7 @@ KResult DevFSInode::remove_child(const StringView&)
|
||||||
|
|
||||||
KResultOr<size_t> DevFSInode::directory_entry_count() const
|
KResultOr<size_t> DevFSInode::directory_entry_count() const
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
KResult DevFSInode::chmod(mode_t)
|
KResult DevFSInode::chmod(mode_t)
|
||||||
|
@ -174,8 +174,8 @@ DevFSLinkInode::DevFSLinkInode(DevFS& fs, String name)
|
||||||
ssize_t DevFSLinkInode::read_bytes(off_t offset, ssize_t, UserOrKernelBuffer& buffer, FileDescription*) const
|
ssize_t DevFSLinkInode::read_bytes(off_t offset, ssize_t, UserOrKernelBuffer& buffer, FileDescription*) const
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(offset == 0);
|
VERIFY(offset == 0);
|
||||||
ASSERT(!m_link.is_null());
|
VERIFY(!m_link.is_null());
|
||||||
if (!buffer.write(((const u8*)m_link.substring_view(0).characters_without_null_termination()) + offset, m_link.length()))
|
if (!buffer.write(((const u8*)m_link.substring_view(0).characters_without_null_termination()) + offset, m_link.length()))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return m_link.length();
|
return m_link.length();
|
||||||
|
@ -195,8 +195,8 @@ InodeMetadata DevFSLinkInode::metadata() const
|
||||||
ssize_t DevFSLinkInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription*)
|
ssize_t DevFSLinkInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription*)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(offset == 0);
|
VERIFY(offset == 0);
|
||||||
ASSERT(buffer.is_kernel_buffer());
|
VERIFY(buffer.is_kernel_buffer());
|
||||||
m_link = buffer.copy_into_string(count);
|
m_link = buffer.copy_into_string(count);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -361,7 +361,7 @@ String DevFSDeviceInode::name() const
|
||||||
ssize_t DevFSDeviceInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
ssize_t DevFSDeviceInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(!!description);
|
VERIFY(!!description);
|
||||||
if (!m_attached_device->can_read(*description, offset))
|
if (!m_attached_device->can_read(*description, offset))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
auto nread = const_cast<Device&>(*m_attached_device).read(*description, offset, buffer, count);
|
auto nread = const_cast<Device&>(*m_attached_device).read(*description, offset, buffer, count);
|
||||||
|
@ -387,7 +387,7 @@ InodeMetadata DevFSDeviceInode::metadata() const
|
||||||
ssize_t DevFSDeviceInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription* description)
|
ssize_t DevFSDeviceInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription* description)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(!!description);
|
VERIFY(!!description);
|
||||||
if (!m_attached_device->can_read(*description, offset))
|
if (!m_attached_device->can_read(*description, offset))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
auto nread = const_cast<Device&>(*m_attached_device).write(*description, offset, buffer, count);
|
auto nread = const_cast<Device&>(*m_attached_device).write(*description, offset, buffer, count);
|
||||||
|
|
|
@ -63,7 +63,7 @@ bool DevPtsFS::initialize()
|
||||||
|
|
||||||
static unsigned inode_index_to_pty_index(InodeIndex inode_index)
|
static unsigned inode_index_to_pty_index(InodeIndex inode_index)
|
||||||
{
|
{
|
||||||
ASSERT(inode_index > 1);
|
VERIFY(inode_index > 1);
|
||||||
return inode_index.value() - 2;
|
return inode_index.value() - 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ RefPtr<Inode> DevPtsFS::get_inode(InodeIdentifier inode_id) const
|
||||||
|
|
||||||
unsigned pty_index = inode_index_to_pty_index(inode_id.index());
|
unsigned pty_index = inode_index_to_pty_index(inode_id.index());
|
||||||
auto* device = Device::get_device(201, pty_index);
|
auto* device = Device::get_device(201, pty_index);
|
||||||
ASSERT(device);
|
VERIFY(device);
|
||||||
|
|
||||||
auto inode = adopt(*new DevPtsFSInode(const_cast<DevPtsFS&>(*this), inode_id.index(), static_cast<SlavePTY*>(device)));
|
auto inode = adopt(*new DevPtsFSInode(const_cast<DevPtsFS&>(*this), inode_id.index(), static_cast<SlavePTY*>(device)));
|
||||||
inode->m_metadata.inode = inode_id;
|
inode->m_metadata.inode = inode_id;
|
||||||
|
@ -122,12 +122,12 @@ DevPtsFSInode::~DevPtsFSInode()
|
||||||
|
|
||||||
ssize_t DevPtsFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
|
ssize_t DevPtsFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t DevPtsFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
|
ssize_t DevPtsFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
InodeMetadata DevPtsFSInode::metadata() const
|
InodeMetadata DevPtsFSInode::metadata() const
|
||||||
|
@ -159,14 +159,14 @@ KResult DevPtsFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEn
|
||||||
|
|
||||||
KResultOr<size_t> DevPtsFSInode::directory_entry_count() const
|
KResultOr<size_t> DevPtsFSInode::directory_entry_count() const
|
||||||
{
|
{
|
||||||
ASSERT(identifier().index() == 1);
|
VERIFY(identifier().index() == 1);
|
||||||
|
|
||||||
return 2 + s_ptys->size();
|
return 2 + s_ptys->size();
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<Inode> DevPtsFSInode::lookup(StringView name)
|
RefPtr<Inode> DevPtsFSInode::lookup(StringView name)
|
||||||
{
|
{
|
||||||
ASSERT(identifier().index() == 1);
|
VERIFY(identifier().index() == 1);
|
||||||
|
|
||||||
if (name == "." || name == "..")
|
if (name == "." || name == "..")
|
||||||
return this;
|
return this;
|
||||||
|
|
|
@ -91,28 +91,28 @@ Ext2FS::~Ext2FS()
|
||||||
bool Ext2FS::flush_super_block()
|
bool Ext2FS::flush_super_block()
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT((sizeof(ext2_super_block) % logical_block_size()) == 0);
|
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
|
||||||
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
|
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
|
||||||
bool success = raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
|
bool success = raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
|
||||||
ASSERT(success);
|
VERIFY(success);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
|
const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
|
||||||
{
|
{
|
||||||
// FIXME: Should this fail gracefully somehow?
|
// FIXME: Should this fail gracefully somehow?
|
||||||
ASSERT(group_index <= m_block_group_count);
|
VERIFY(group_index <= m_block_group_count);
|
||||||
ASSERT(group_index > 0);
|
VERIFY(group_index > 0);
|
||||||
return block_group_descriptors()[group_index.value() - 1];
|
return block_group_descriptors()[group_index.value() - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Ext2FS::initialize()
|
bool Ext2FS::initialize()
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT((sizeof(ext2_super_block) % logical_block_size()) == 0);
|
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
|
||||||
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
|
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
|
||||||
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
|
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
|
||||||
ASSERT(success);
|
VERIFY(success);
|
||||||
|
|
||||||
auto& super_block = this->super_block();
|
auto& super_block = this->super_block();
|
||||||
if constexpr (EXT2_DEBUG) {
|
if constexpr (EXT2_DEBUG) {
|
||||||
|
@ -134,7 +134,7 @@ bool Ext2FS::initialize()
|
||||||
|
|
||||||
set_block_size(EXT2_BLOCK_SIZE(&super_block));
|
set_block_size(EXT2_BLOCK_SIZE(&super_block));
|
||||||
|
|
||||||
ASSERT(block_size() <= (int)max_block_size);
|
VERIFY(block_size() <= (int)max_block_size);
|
||||||
|
|
||||||
m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group);
|
m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group);
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ Ext2FS::BlockListShape Ext2FS::compute_block_list_shape(unsigned blocks) const
|
||||||
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block * entries_per_block);
|
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block * entries_per_block);
|
||||||
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block);
|
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block);
|
||||||
blocks_remaining -= shape.triply_indirect_blocks;
|
blocks_remaining -= shape.triply_indirect_blocks;
|
||||||
ASSERT(blocks_remaining == 0);
|
VERIFY(blocks_remaining == 0);
|
||||||
return shape;
|
return shape;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
|
||||||
auto block_contents = ByteBuffer::create_uninitialized(block_size());
|
auto block_contents = ByteBuffer::create_uninitialized(block_size());
|
||||||
OutputMemoryStream stream { block_contents };
|
OutputMemoryStream stream { block_contents };
|
||||||
|
|
||||||
ASSERT(new_shape.indirect_blocks <= entries_per_block);
|
VERIFY(new_shape.indirect_blocks <= entries_per_block);
|
||||||
for (unsigned i = 0; i < new_shape.indirect_blocks; ++i) {
|
for (unsigned i = 0; i < new_shape.indirect_blocks; ++i) {
|
||||||
stream << blocks[output_block_index++].value();
|
stream << blocks[output_block_index++].value();
|
||||||
--remaining_blocks;
|
--remaining_blocks;
|
||||||
|
@ -355,7 +355,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
|
||||||
}
|
}
|
||||||
auto* dind_block_as_pointers = (unsigned*)dind_block_contents.data();
|
auto* dind_block_as_pointers = (unsigned*)dind_block_contents.data();
|
||||||
|
|
||||||
ASSERT(indirect_block_count <= entries_per_block);
|
VERIFY(indirect_block_count <= entries_per_block);
|
||||||
for (unsigned i = 0; i < indirect_block_count; ++i) {
|
for (unsigned i = 0; i < indirect_block_count; ++i) {
|
||||||
bool ind_block_dirty = false;
|
bool ind_block_dirty = false;
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
|
||||||
if (entries_to_write > entries_per_block)
|
if (entries_to_write > entries_per_block)
|
||||||
entries_to_write = entries_per_block;
|
entries_to_write = entries_per_block;
|
||||||
|
|
||||||
ASSERT(entries_to_write <= entries_per_block);
|
VERIFY(entries_to_write <= entries_per_block);
|
||||||
for (unsigned j = 0; j < entries_to_write; ++j) {
|
for (unsigned j = 0; j < entries_to_write; ++j) {
|
||||||
BlockIndex output_block = blocks[output_block_index++];
|
BlockIndex output_block = blocks[output_block_index++];
|
||||||
if (ind_block_as_pointers[j] != output_block) {
|
if (ind_block_as_pointers[j] != output_block) {
|
||||||
|
@ -405,7 +405,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
|
||||||
if (ind_block_dirty) {
|
if (ind_block_dirty) {
|
||||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(ind_block_contents.data());
|
auto buffer = UserOrKernelBuffer::for_kernel_buffer(ind_block_contents.data());
|
||||||
int err = write_block(indirect_block_index, buffer, block_size());
|
int err = write_block(indirect_block_index, buffer, block_size());
|
||||||
ASSERT(err >= 0);
|
VERIFY(err >= 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (unsigned i = indirect_block_count; i < entries_per_block; ++i) {
|
for (unsigned i = indirect_block_count; i < entries_per_block; ++i) {
|
||||||
|
@ -418,7 +418,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
|
||||||
if (dind_block_dirty) {
|
if (dind_block_dirty) {
|
||||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(dind_block_contents.data());
|
auto buffer = UserOrKernelBuffer::for_kernel_buffer(dind_block_contents.data());
|
||||||
int err = write_block(e2inode.i_block[EXT2_DIND_BLOCK], buffer, block_size());
|
int err = write_block(e2inode.i_block[EXT2_DIND_BLOCK], buffer, block_size());
|
||||||
ASSERT(err >= 0);
|
VERIFY(err >= 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,7 +427,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
|
||||||
|
|
||||||
// FIXME: Implement!
|
// FIXME: Implement!
|
||||||
dbgln("we don't know how to write tind ext2fs blocks yet!");
|
dbgln("we don't know how to write tind ext2fs blocks yet!");
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode(const ext2_inode& e2inode, bool include_block_list_blocks) const
|
Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode(const ext2_inode& e2inode, bool include_block_list_blocks) const
|
||||||
|
@ -536,13 +536,13 @@ Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode_impl(const ext2_inode& e
|
||||||
void Ext2FS::free_inode(Ext2FSInode& inode)
|
void Ext2FS::free_inode(Ext2FSInode& inode)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(inode.m_raw_inode.i_links_count == 0);
|
VERIFY(inode.m_raw_inode.i_links_count == 0);
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FS: Inode {} has no more links, time to delete!", inode.index());
|
dbgln_if(EXT2_DEBUG, "Ext2FS: Inode {} has no more links, time to delete!", inode.index());
|
||||||
|
|
||||||
// Mark all blocks used by this inode as free.
|
// Mark all blocks used by this inode as free.
|
||||||
auto block_list = block_list_for_inode(inode.m_raw_inode, true);
|
auto block_list = block_list_for_inode(inode.m_raw_inode, true);
|
||||||
for (auto block_index : block_list) {
|
for (auto block_index : block_list) {
|
||||||
ASSERT(block_index <= super_block().s_blocks_count);
|
VERIFY(block_index <= super_block().s_blocks_count);
|
||||||
if (block_index.value())
|
if (block_index.value())
|
||||||
set_block_allocation_state(block_index, false);
|
set_block_allocation_state(block_index, false);
|
||||||
}
|
}
|
||||||
|
@ -674,7 +674,7 @@ void Ext2FSInode::flush_metadata()
|
||||||
RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
|
RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(inode.fsid() == fsid());
|
VERIFY(inode.fsid() == fsid());
|
||||||
|
|
||||||
{
|
{
|
||||||
auto it = m_inode_cache.find(inode.index());
|
auto it = m_inode_cache.find(inode.index());
|
||||||
|
@ -706,14 +706,14 @@ RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
|
||||||
ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
||||||
{
|
{
|
||||||
Locker inode_locker(m_lock);
|
Locker inode_locker(m_lock);
|
||||||
ASSERT(offset >= 0);
|
VERIFY(offset >= 0);
|
||||||
if (m_raw_inode.i_size == 0)
|
if (m_raw_inode.i_size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// Symbolic links shorter than 60 characters are store inline inside the i_block array.
|
// Symbolic links shorter than 60 characters are store inline inside the i_block array.
|
||||||
// This avoids wasting an entire block on short links. (Most links are short.)
|
// This avoids wasting an entire block on short links. (Most links are short.)
|
||||||
if (is_symlink() && size() < max_inline_symlink_length) {
|
if (is_symlink() && size() < max_inline_symlink_length) {
|
||||||
ASSERT(offset == 0);
|
VERIFY(offset == 0);
|
||||||
ssize_t nread = min((off_t)size() - offset, static_cast<off_t>(count));
|
ssize_t nread = min((off_t)size() - offset, static_cast<off_t>(count));
|
||||||
if (!buffer.write(((const u8*)m_raw_inode.i_block) + offset, (size_t)nread))
|
if (!buffer.write(((const u8*)m_raw_inode.i_block) + offset, (size_t)nread))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -748,7 +748,7 @@ ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer&
|
||||||
|
|
||||||
for (size_t bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; ++bi) {
|
for (size_t bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; ++bi) {
|
||||||
auto block_index = m_block_list[bi];
|
auto block_index = m_block_list[bi];
|
||||||
ASSERT(block_index.value());
|
VERIFY(block_index.value());
|
||||||
size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0;
|
size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0;
|
||||||
size_t num_bytes_to_copy = min(block_size - offset_into_block, remaining_count);
|
size_t num_bytes_to_copy = min(block_size - offset_into_block, remaining_count);
|
||||||
auto buffer_offset = buffer.offset(nread);
|
auto buffer_offset = buffer.offset(nread);
|
||||||
|
@ -827,7 +827,7 @@ KResult Ext2FSInode::resize(u64 new_size)
|
||||||
auto nwritten = write_bytes(clear_from, min(sizeof(zero_buffer), bytes_to_clear), UserOrKernelBuffer::for_kernel_buffer(zero_buffer), nullptr);
|
auto nwritten = write_bytes(clear_from, min(sizeof(zero_buffer), bytes_to_clear), UserOrKernelBuffer::for_kernel_buffer(zero_buffer), nullptr);
|
||||||
if (nwritten < 0)
|
if (nwritten < 0)
|
||||||
return KResult((ErrnoCode)-nwritten);
|
return KResult((ErrnoCode)-nwritten);
|
||||||
ASSERT(nwritten != 0);
|
VERIFY(nwritten != 0);
|
||||||
bytes_to_clear -= nwritten;
|
bytes_to_clear -= nwritten;
|
||||||
clear_from += nwritten;
|
clear_from += nwritten;
|
||||||
}
|
}
|
||||||
|
@ -838,8 +838,8 @@ KResult Ext2FSInode::resize(u64 new_size)
|
||||||
|
|
||||||
ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& data, FileDescription* description)
|
ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& data, FileDescription* description)
|
||||||
{
|
{
|
||||||
ASSERT(offset >= 0);
|
VERIFY(offset >= 0);
|
||||||
ASSERT(count >= 0);
|
VERIFY(count >= 0);
|
||||||
|
|
||||||
Locker inode_locker(m_lock);
|
Locker inode_locker(m_lock);
|
||||||
Locker fs_locker(fs().m_lock);
|
Locker fs_locker(fs().m_lock);
|
||||||
|
@ -849,7 +849,7 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernel
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
if (is_symlink()) {
|
if (is_symlink()) {
|
||||||
ASSERT(offset == 0);
|
VERIFY(offset == 0);
|
||||||
if (max((size_t)(offset + count), (size_t)m_raw_inode.i_size) < max_inline_symlink_length) {
|
if (max((size_t)(offset + count), (size_t)m_raw_inode.i_size) < max_inline_symlink_length) {
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FS: write_bytes poking into i_block array for inline symlink '{}' ({} bytes)", data.copy_into_string(count), count);
|
dbgln_if(EXT2_DEBUG, "Ext2FS: write_bytes poking into i_block array for inline symlink '{}' ({} bytes)", data.copy_into_string(count), count);
|
||||||
if (!data.read(((u8*)m_raw_inode.i_block) + offset, (size_t)count))
|
if (!data.read(((u8*)m_raw_inode.i_block) + offset, (size_t)count))
|
||||||
|
@ -937,7 +937,7 @@ u8 Ext2FS::internal_file_type_to_directory_entry_type(const DirectoryEntryView&
|
||||||
KResult Ext2FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
|
KResult Ext2FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
|
|
||||||
dbgln_if(EXT2_VERY_DEBUG, "Ext2FS: Traversing as directory: {}", index());
|
dbgln_if(EXT2_VERY_DEBUG, "Ext2FS: Traversing as directory: {}", index());
|
||||||
|
|
||||||
|
@ -1020,7 +1020,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FSInode::create_child(const String& name, mo
|
||||||
KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode)
|
KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
|
|
||||||
if (name.length() > EXT2_NAME_LEN)
|
if (name.length() > EXT2_NAME_LEN)
|
||||||
return ENAMETOOLONG;
|
return ENAMETOOLONG;
|
||||||
|
@ -1064,7 +1064,7 @@ KResult Ext2FSInode::remove_child(const StringView& name)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FSInode::remove_child('{}') in inode {}", name, index());
|
dbgln_if(EXT2_DEBUG, "Ext2FSInode::remove_child('{}') in inode {}", name, index());
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
|
|
||||||
auto it = m_lookup_cache.find(name);
|
auto it = m_lookup_cache.find(name);
|
||||||
if (it == m_lookup_cache.end())
|
if (it == m_lookup_cache.end())
|
||||||
|
@ -1162,7 +1162,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(found_a_group);
|
VERIFY(found_a_group);
|
||||||
auto& bgd = group_descriptor(group_index);
|
auto& bgd = group_descriptor(group_index);
|
||||||
auto& cached_bitmap = get_bitmap_block(bgd.bg_block_bitmap);
|
auto& cached_bitmap = get_bitmap_block(bgd.bg_block_bitmap);
|
||||||
|
|
||||||
|
@ -1172,7 +1172,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
|
||||||
BlockIndex first_block_in_group = (group_index.value() - 1) * blocks_per_group() + first_block_index().value();
|
BlockIndex first_block_in_group = (group_index.value() - 1) * blocks_per_group() + first_block_index().value();
|
||||||
size_t free_region_size = 0;
|
size_t free_region_size = 0;
|
||||||
auto first_unset_bit_index = block_bitmap.find_longest_range_of_unset_bits(count - blocks.size(), free_region_size);
|
auto first_unset_bit_index = block_bitmap.find_longest_range_of_unset_bits(count - blocks.size(), free_region_size);
|
||||||
ASSERT(first_unset_bit_index.has_value());
|
VERIFY(first_unset_bit_index.has_value());
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FS: allocating free region of size: {} [{}]", free_region_size, group_index);
|
dbgln_if(EXT2_DEBUG, "Ext2FS: allocating free region of size: {} [{}]", free_region_size, group_index);
|
||||||
for (size_t i = 0; i < free_region_size; ++i) {
|
for (size_t i = 0; i < free_region_size; ++i) {
|
||||||
BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group.value();
|
BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group.value();
|
||||||
|
@ -1182,7 +1182,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(blocks.size() == count);
|
VERIFY(blocks.size() == count);
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1239,7 +1239,7 @@ InodeIndex Ext2FS::find_a_free_inode(GroupIndex preferred_group)
|
||||||
InodeIndex inode = first_free_inode_in_group;
|
InodeIndex inode = first_free_inode_in_group;
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FS: found suitable inode {}", inode);
|
dbgln_if(EXT2_DEBUG, "Ext2FS: found suitable inode {}", inode);
|
||||||
|
|
||||||
ASSERT(get_inode_allocation_state(inode) == false);
|
VERIFY(get_inode_allocation_state(inode) == false);
|
||||||
return inode;
|
return inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1285,7 +1285,7 @@ bool Ext2FS::set_inode_allocation_state(InodeIndex inode_index, bool new_state)
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FS: set_inode_allocation_state({}) {} -> {}", inode_index, current_state, new_state);
|
dbgln_if(EXT2_DEBUG, "Ext2FS: set_inode_allocation_state({}) {} -> {}", inode_index, current_state, new_state);
|
||||||
|
|
||||||
if (current_state == new_state) {
|
if (current_state == new_state) {
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1325,14 +1325,14 @@ Ext2FS::CachedBitmap& Ext2FS::get_bitmap_block(BlockIndex bitmap_block_index)
|
||||||
auto block = KBuffer::create_with_size(block_size(), Region::Access::Read | Region::Access::Write, "Ext2FS: Cached bitmap block");
|
auto block = KBuffer::create_with_size(block_size(), Region::Access::Read | Region::Access::Write, "Ext2FS: Cached bitmap block");
|
||||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(block.data());
|
auto buffer = UserOrKernelBuffer::for_kernel_buffer(block.data());
|
||||||
int err = read_block(bitmap_block_index, &buffer, block_size());
|
int err = read_block(bitmap_block_index, &buffer, block_size());
|
||||||
ASSERT(err >= 0);
|
VERIFY(err >= 0);
|
||||||
m_cached_bitmaps.append(make<CachedBitmap>(bitmap_block_index, move(block)));
|
m_cached_bitmaps.append(make<CachedBitmap>(bitmap_block_index, move(block)));
|
||||||
return *m_cached_bitmaps.last();
|
return *m_cached_bitmaps.last();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
|
bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
|
||||||
{
|
{
|
||||||
ASSERT(block_index != 0);
|
VERIFY(block_index != 0);
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
|
|
||||||
auto group_index = group_index_from_block_index(block_index);
|
auto group_index = group_index_from_block_index(block_index);
|
||||||
|
@ -1346,7 +1346,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
|
||||||
dbgln_if(EXT2_DEBUG, "Ext2FS: block {} state: {} -> {} (in bitmap block {})", block_index, current_state, new_state, bgd.bg_block_bitmap);
|
dbgln_if(EXT2_DEBUG, "Ext2FS: block {} state: {} -> {} (in bitmap block {})", block_index, current_state, new_state, bgd.bg_block_bitmap);
|
||||||
|
|
||||||
if (current_state == new_state) {
|
if (current_state == new_state) {
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1374,7 +1374,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
|
||||||
KResult Ext2FS::create_directory(Ext2FSInode& parent_inode, const String& name, mode_t mode, uid_t uid, gid_t gid)
|
KResult Ext2FS::create_directory(Ext2FSInode& parent_inode, const String& name, mode_t mode, uid_t uid, gid_t gid)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(is_directory(mode));
|
VERIFY(is_directory(mode));
|
||||||
|
|
||||||
auto inode_or_error = create_inode(parent_inode, name, mode, 0, uid, gid);
|
auto inode_or_error = create_inode(parent_inode, name, mode, 0, uid, gid);
|
||||||
if (inode_or_error.is_error())
|
if (inode_or_error.is_error())
|
||||||
|
@ -1424,7 +1424,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
|
||||||
|
|
||||||
// Looks like we're good, time to update the inode bitmap and group+global inode counters.
|
// Looks like we're good, time to update the inode bitmap and group+global inode counters.
|
||||||
bool success = set_inode_allocation_state(inode_id, true);
|
bool success = set_inode_allocation_state(inode_id, true);
|
||||||
ASSERT(success);
|
VERIFY(success);
|
||||||
|
|
||||||
struct timeval now;
|
struct timeval now;
|
||||||
kgettimeofday(now);
|
kgettimeofday(now);
|
||||||
|
@ -1450,7 +1450,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
|
||||||
|
|
||||||
e2inode.i_flags = 0;
|
e2inode.i_flags = 0;
|
||||||
success = write_ext2_inode(inode_id, e2inode);
|
success = write_ext2_inode(inode_id, e2inode);
|
||||||
ASSERT(success);
|
VERIFY(success);
|
||||||
|
|
||||||
// We might have cached the fact that this inode didn't exist. Wipe the slate.
|
// We might have cached the fact that this inode didn't exist. Wipe the slate.
|
||||||
m_inode_cache.remove(inode_id);
|
m_inode_cache.remove(inode_id);
|
||||||
|
@ -1487,7 +1487,7 @@ bool Ext2FSInode::populate_lookup_cache() const
|
||||||
|
|
||||||
RefPtr<Inode> Ext2FSInode::lookup(StringView name)
|
RefPtr<Inode> Ext2FSInode::lookup(StringView name)
|
||||||
{
|
{
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
if (!populate_lookup_cache())
|
if (!populate_lookup_cache())
|
||||||
return {};
|
return {};
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
|
@ -1549,7 +1549,7 @@ KResult Ext2FSInode::decrement_link_count()
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
if (fs().is_readonly())
|
if (fs().is_readonly())
|
||||||
return EROFS;
|
return EROFS;
|
||||||
ASSERT(m_raw_inode.i_links_count);
|
VERIFY(m_raw_inode.i_links_count);
|
||||||
--m_raw_inode.i_links_count;
|
--m_raw_inode.i_links_count;
|
||||||
if (ref_count() == 1 && m_raw_inode.i_links_count == 0)
|
if (ref_count() == 1 && m_raw_inode.i_links_count == 0)
|
||||||
fs().uncache_inode(index());
|
fs().uncache_inode(index());
|
||||||
|
@ -1565,7 +1565,7 @@ void Ext2FS::uncache_inode(InodeIndex index)
|
||||||
|
|
||||||
KResultOr<size_t> Ext2FSInode::directory_entry_count() const
|
KResultOr<size_t> Ext2FSInode::directory_entry_count() const
|
||||||
{
|
{
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
populate_lookup_cache();
|
populate_lookup_cache();
|
||||||
return m_lookup_cache.size();
|
return m_lookup_cache.size();
|
||||||
|
|
|
@ -134,13 +134,13 @@ void FIFO::detach(Direction direction)
|
||||||
#if FIFO_DEBUG
|
#if FIFO_DEBUG
|
||||||
klog() << "close reader (" << m_readers << " - 1)";
|
klog() << "close reader (" << m_readers << " - 1)";
|
||||||
#endif
|
#endif
|
||||||
ASSERT(m_readers);
|
VERIFY(m_readers);
|
||||||
--m_readers;
|
--m_readers;
|
||||||
} else if (direction == Direction::Writer) {
|
} else if (direction == Direction::Writer) {
|
||||||
#if FIFO_DEBUG
|
#if FIFO_DEBUG
|
||||||
klog() << "close writer (" << m_writers << " - 1)";
|
klog() << "close writer (" << m_writers << " - 1)";
|
||||||
#endif
|
#endif
|
||||||
ASSERT(m_writers);
|
VERIFY(m_writers);
|
||||||
--m_writers;
|
--m_writers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ public:
|
||||||
|
|
||||||
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override
|
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override
|
||||||
{
|
{
|
||||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
|
VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
|
||||||
auto& blocker = static_cast<Thread::FileBlocker&>(b);
|
auto& blocker = static_cast<Thread::FileBlocker&>(b);
|
||||||
return !blocker.unblock(true, data);
|
return !blocker.unblock(true, data);
|
||||||
}
|
}
|
||||||
|
@ -59,7 +59,7 @@ public:
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
do_unblock([&](auto& b, void* data, bool&) {
|
do_unblock([&](auto& b, void* data, bool&) {
|
||||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
|
VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
|
||||||
auto& blocker = static_cast<Thread::FileBlocker&>(b);
|
auto& blocker = static_cast<Thread::FileBlocker&>(b);
|
||||||
return blocker.unblock(false, data);
|
return blocker.unblock(false, data);
|
||||||
});
|
});
|
||||||
|
@ -159,7 +159,7 @@ protected:
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE void do_evaluate_block_conditions()
|
ALWAYS_INLINE void do_evaluate_block_conditions()
|
||||||
{
|
{
|
||||||
ASSERT(!Processor::current().in_irq());
|
VERIFY(!Processor::current().in_irq());
|
||||||
block_condition().unblock();
|
block_condition().unblock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,7 @@ Thread::FileBlocker::BlockFlags FileDescription::should_unblock(Thread::FileBloc
|
||||||
|
|
||||||
if ((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::SocketFlags) {
|
if ((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::SocketFlags) {
|
||||||
auto* sock = socket();
|
auto* sock = socket();
|
||||||
ASSERT(sock);
|
VERIFY(sock);
|
||||||
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Accept) && sock->can_accept())
|
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Accept) && sock->can_accept())
|
||||||
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Accept;
|
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Accept;
|
||||||
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Connect) && sock->setup_state() == Socket::SetupState::Completed)
|
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Connect) && sock->setup_state() == Socket::SetupState::Completed)
|
||||||
|
@ -205,8 +205,8 @@ bool FileDescription::can_read() const
|
||||||
KResultOr<NonnullOwnPtr<KBuffer>> FileDescription::read_entire_file()
|
KResultOr<NonnullOwnPtr<KBuffer>> FileDescription::read_entire_file()
|
||||||
{
|
{
|
||||||
// HACK ALERT: (This entire function)
|
// HACK ALERT: (This entire function)
|
||||||
ASSERT(m_file->is_inode());
|
VERIFY(m_file->is_inode());
|
||||||
ASSERT(m_inode);
|
VERIFY(m_inode);
|
||||||
return m_inode->read_entire(this);
|
return m_inode->read_entire(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ void FS::lock_all()
|
||||||
|
|
||||||
void FS::set_block_size(size_t block_size)
|
void FS::set_block_size(size_t block_size)
|
||||||
{
|
{
|
||||||
ASSERT(block_size > 0);
|
VERIFY(block_size > 0);
|
||||||
if (block_size == m_block_size)
|
if (block_size == m_block_size)
|
||||||
return;
|
return;
|
||||||
m_block_size = block_size;
|
m_block_size = block_size;
|
||||||
|
|
|
@ -49,7 +49,7 @@ SpinLock<u32>& Inode::all_inodes_lock()
|
||||||
|
|
||||||
InlineLinkedList<Inode>& Inode::all_with_lock()
|
InlineLinkedList<Inode>& Inode::all_with_lock()
|
||||||
{
|
{
|
||||||
ASSERT(s_all_inodes_lock.is_locked());
|
VERIFY(s_all_inodes_lock.is_locked());
|
||||||
|
|
||||||
return *s_list;
|
return *s_list;
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ void Inode::sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& inode : inodes) {
|
for (auto& inode : inodes) {
|
||||||
ASSERT(inode.is_metadata_dirty());
|
VERIFY(inode.is_metadata_dirty());
|
||||||
inode.flush_metadata();
|
inode.flush_metadata();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ KResultOr<NonnullOwnPtr<KBuffer>> Inode::read_entire(FileDescription* descriptio
|
||||||
nread = read_bytes(offset, sizeof(buffer), buf, description);
|
nread = read_bytes(offset, sizeof(buffer), buf, description);
|
||||||
if (nread < 0)
|
if (nread < 0)
|
||||||
return KResult((ErrnoCode)-nread);
|
return KResult((ErrnoCode)-nread);
|
||||||
ASSERT(nread <= (ssize_t)sizeof(buffer));
|
VERIFY(nread <= (ssize_t)sizeof(buffer));
|
||||||
if (nread <= 0)
|
if (nread <= 0)
|
||||||
break;
|
break;
|
||||||
builder.append((const char*)buffer, nread);
|
builder.append((const char*)buffer, nread);
|
||||||
|
@ -203,27 +203,27 @@ bool Inode::unbind_socket()
|
||||||
void Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
void Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(!m_watchers.contains(&watcher));
|
VERIFY(!m_watchers.contains(&watcher));
|
||||||
m_watchers.set(&watcher);
|
m_watchers.set(&watcher);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(m_watchers.contains(&watcher));
|
VERIFY(m_watchers.contains(&watcher));
|
||||||
m_watchers.remove(&watcher);
|
m_watchers.remove(&watcher);
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullRefPtr<FIFO> Inode::fifo()
|
NonnullRefPtr<FIFO> Inode::fifo()
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(metadata().is_fifo());
|
VERIFY(metadata().is_fifo());
|
||||||
|
|
||||||
// FIXME: Release m_fifo when it is closed by all readers and writers
|
// FIXME: Release m_fifo when it is closed by all readers and writers
|
||||||
if (!m_fifo)
|
if (!m_fifo)
|
||||||
m_fifo = FIFO::create(metadata().uid);
|
m_fifo = FIFO::create(metadata().uid);
|
||||||
|
|
||||||
ASSERT(m_fifo);
|
VERIFY(m_fifo);
|
||||||
return *m_fifo;
|
return *m_fifo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ void Inode::set_metadata_dirty(bool metadata_dirty)
|
||||||
|
|
||||||
if (metadata_dirty) {
|
if (metadata_dirty) {
|
||||||
// Sanity check.
|
// Sanity check.
|
||||||
ASSERT(!fs().is_readonly());
|
VERIFY(!fs().is_readonly());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_metadata_dirty == metadata_dirty)
|
if (m_metadata_dirty == metadata_dirty)
|
||||||
|
|
|
@ -122,8 +122,8 @@ KResultOr<Region*> InodeFile::mmap(Process& process, FileDescription& descriptio
|
||||||
|
|
||||||
String InodeFile::absolute_path(const FileDescription& description) const
|
String InodeFile::absolute_path(const FileDescription& description) const
|
||||||
{
|
{
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
ASSERT(description.custody());
|
VERIFY(description.custody());
|
||||||
return description.absolute_path();
|
return description.absolute_path();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,15 +140,15 @@ KResult InodeFile::truncate(u64 size)
|
||||||
|
|
||||||
KResult InodeFile::chown(FileDescription& description, uid_t uid, gid_t gid)
|
KResult InodeFile::chown(FileDescription& description, uid_t uid, gid_t gid)
|
||||||
{
|
{
|
||||||
ASSERT(description.inode() == m_inode);
|
VERIFY(description.inode() == m_inode);
|
||||||
ASSERT(description.custody());
|
VERIFY(description.custody());
|
||||||
return VFS::the().chown(*description.custody(), uid, gid);
|
return VFS::the().chown(*description.custody(), uid, gid);
|
||||||
}
|
}
|
||||||
|
|
||||||
KResult InodeFile::chmod(FileDescription& description, mode_t mode)
|
KResult InodeFile::chmod(FileDescription& description, mode_t mode)
|
||||||
{
|
{
|
||||||
ASSERT(description.inode() == m_inode);
|
VERIFY(description.inode() == m_inode);
|
||||||
ASSERT(description.custody());
|
VERIFY(description.custody());
|
||||||
return VFS::the().chmod(*description.custody(), mode);
|
return VFS::the().chmod(*description.custody(), mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,7 @@ bool InodeWatcher::can_write(const FileDescription&, size_t) const
|
||||||
KResultOr<size_t> InodeWatcher::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t buffer_size)
|
KResultOr<size_t> InodeWatcher::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t buffer_size)
|
||||||
{
|
{
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
ASSERT(!m_queue.is_empty() || !m_inode);
|
VERIFY(!m_queue.is_empty() || !m_inode);
|
||||||
|
|
||||||
if (!m_inode)
|
if (!m_inode)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -44,7 +44,7 @@ Plan9FS::~Plan9FS()
|
||||||
{
|
{
|
||||||
// Make sure to destroy the root inode before the FS gets destroyed.
|
// Make sure to destroy the root inode before the FS gets destroyed.
|
||||||
if (m_root_inode) {
|
if (m_root_inode) {
|
||||||
ASSERT(m_root_inode->ref_count() == 1);
|
VERIFY(m_root_inode->ref_count() == 1);
|
||||||
m_root_inode = nullptr;
|
m_root_inode = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ public:
|
||||||
template<typename N>
|
template<typename N>
|
||||||
Decoder& read_number(N& number)
|
Decoder& read_number(N& number)
|
||||||
{
|
{
|
||||||
ASSERT(sizeof(number) <= m_data.length());
|
VERIFY(sizeof(number) <= m_data.length());
|
||||||
memcpy(&number, m_data.characters_without_null_termination(), sizeof(number));
|
memcpy(&number, m_data.characters_without_null_termination(), sizeof(number));
|
||||||
m_data = m_data.substring_view(sizeof(number), m_data.length() - sizeof(number));
|
m_data = m_data.substring_view(sizeof(number), m_data.length() - sizeof(number));
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -170,14 +170,14 @@ public:
|
||||||
template<typename T>
|
template<typename T>
|
||||||
Message& operator>>(T& t)
|
Message& operator>>(T& t)
|
||||||
{
|
{
|
||||||
ASSERT(m_have_been_built);
|
VERIFY(m_have_been_built);
|
||||||
m_built.decoder >> t;
|
m_built.decoder >> t;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
StringView read_data()
|
StringView read_data()
|
||||||
{
|
{
|
||||||
ASSERT(m_have_been_built);
|
VERIFY(m_have_been_built);
|
||||||
return m_built.decoder.read_data();
|
return m_built.decoder.read_data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ private:
|
||||||
template<typename N>
|
template<typename N>
|
||||||
Message& append_number(N number)
|
Message& append_number(N number)
|
||||||
{
|
{
|
||||||
ASSERT(!m_have_been_built);
|
VERIFY(!m_have_been_built);
|
||||||
m_builder.append(reinterpret_cast<const char*>(&number), sizeof(number));
|
m_builder.append(reinterpret_cast<const char*>(&number), sizeof(number));
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
@ -330,7 +330,7 @@ Plan9FS::Message::Decoder& Plan9FS::Message::Decoder::operator>>(StringView& str
|
||||||
{
|
{
|
||||||
u16 length;
|
u16 length;
|
||||||
*this >> length;
|
*this >> length;
|
||||||
ASSERT(length <= m_data.length());
|
VERIFY(length <= m_data.length());
|
||||||
string = m_data.substring_view(0, length);
|
string = m_data.substring_view(0, length);
|
||||||
m_data = m_data.substring_view_starting_after_substring(string);
|
m_data = m_data.substring_view_starting_after_substring(string);
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -340,7 +340,7 @@ StringView Plan9FS::Message::Decoder::read_data()
|
||||||
{
|
{
|
||||||
u32 length;
|
u32 length;
|
||||||
*this >> length;
|
*this >> length;
|
||||||
ASSERT(length <= m_data.length());
|
VERIFY(length <= m_data.length());
|
||||||
auto data = m_data.substring_view(0, length);
|
auto data = m_data.substring_view(0, length);
|
||||||
m_data = m_data.substring_view_starting_after_substring(data);
|
m_data = m_data.substring_view_starting_after_substring(data);
|
||||||
return data;
|
return data;
|
||||||
|
@ -401,12 +401,12 @@ Plan9FS::Message& Plan9FS::Message::operator=(Message&& message)
|
||||||
|
|
||||||
const KBuffer& Plan9FS::Message::build()
|
const KBuffer& Plan9FS::Message::build()
|
||||||
{
|
{
|
||||||
ASSERT(!m_have_been_built);
|
VERIFY(!m_have_been_built);
|
||||||
|
|
||||||
auto tmp_buffer = m_builder.build();
|
auto tmp_buffer = m_builder.build();
|
||||||
|
|
||||||
// FIXME: We should not assume success here.
|
// FIXME: We should not assume success here.
|
||||||
ASSERT(tmp_buffer);
|
VERIFY(tmp_buffer);
|
||||||
|
|
||||||
m_have_been_built = true;
|
m_have_been_built = true;
|
||||||
m_builder.~KBufferBuilder();
|
m_builder.~KBufferBuilder();
|
||||||
|
@ -470,7 +470,7 @@ bool Plan9FS::Plan9FSBlockCondition::should_add_blocker(Thread::Blocker& b, void
|
||||||
void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
|
void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
|
||||||
{
|
{
|
||||||
unblock([&](Thread::Blocker& b, void*, bool&) {
|
unblock([&](Thread::Blocker& b, void*, bool&) {
|
||||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
|
VERIFY(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
|
||||||
auto& blocker = static_cast<Blocker&>(b);
|
auto& blocker = static_cast<Blocker&>(b);
|
||||||
return blocker.unblock(tag);
|
return blocker.unblock(tag);
|
||||||
});
|
});
|
||||||
|
@ -479,7 +479,7 @@ void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
|
||||||
void Plan9FS::Plan9FSBlockCondition::unblock_all()
|
void Plan9FS::Plan9FSBlockCondition::unblock_all()
|
||||||
{
|
{
|
||||||
unblock([&](Thread::Blocker& b, void*, bool&) {
|
unblock([&](Thread::Blocker& b, void*, bool&) {
|
||||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
|
VERIFY(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
|
||||||
auto& blocker = static_cast<Blocker&>(b);
|
auto& blocker = static_cast<Blocker&>(b);
|
||||||
return blocker.unblock();
|
return blocker.unblock();
|
||||||
});
|
});
|
||||||
|
@ -498,13 +498,13 @@ bool Plan9FS::is_complete(const ReceiveCompletion& completion)
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
if (m_completions.contains(completion.tag)) {
|
if (m_completions.contains(completion.tag)) {
|
||||||
// If it's still in the map then it can't be complete
|
// If it's still in the map then it can't be complete
|
||||||
ASSERT(!completion.completed);
|
VERIFY(!completion.completed);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if it's not in the map anymore, it must be complete. But we MUST
|
// if it's not in the map anymore, it must be complete. But we MUST
|
||||||
// hold m_lock to be able to check completion.completed!
|
// hold m_lock to be able to check completion.completed!
|
||||||
ASSERT(completion.completed);
|
VERIFY(completion.completed);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -150,14 +150,14 @@ static inline ProcFileType to_proc_file_type(const InodeIdentifier& identifier)
|
||||||
|
|
||||||
static inline int to_fd(const InodeIdentifier& identifier)
|
static inline int to_fd(const InodeIdentifier& identifier)
|
||||||
{
|
{
|
||||||
ASSERT(to_proc_parent_directory(identifier) == PDI_PID_fd);
|
VERIFY(to_proc_parent_directory(identifier) == PDI_PID_fd);
|
||||||
return (identifier.index().value() & 0xff) - FI_MaxStaticFileIndex;
|
return (identifier.index().value() & 0xff) - FI_MaxStaticFileIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t to_sys_index(const InodeIdentifier& identifier)
|
static inline size_t to_sys_index(const InodeIdentifier& identifier)
|
||||||
{
|
{
|
||||||
ASSERT(to_proc_parent_directory(identifier) == PDI_Root_sys);
|
VERIFY(to_proc_parent_directory(identifier) == PDI_Root_sys);
|
||||||
ASSERT(to_proc_file_type(identifier) == FI_Root_sys_variable);
|
VERIFY(to_proc_file_type(identifier) == FI_Root_sys_variable);
|
||||||
return identifier.index().value() >> 16u;
|
return identifier.index().value() >> 16u;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ static inline InodeIdentifier to_identifier_with_stack(unsigned fsid, ThreadID t
|
||||||
|
|
||||||
static inline InodeIdentifier sys_var_to_identifier(unsigned fsid, unsigned index)
|
static inline InodeIdentifier sys_var_to_identifier(unsigned fsid, unsigned index)
|
||||||
{
|
{
|
||||||
ASSERT(index < 256);
|
VERIFY(index < 256);
|
||||||
return { fsid, (PDI_Root_sys << 12u) | (index << 16u) | FI_Root_sys_variable };
|
return { fsid, (PDI_Root_sys << 12u) | (index << 16u) | FI_Root_sys_variable };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ static inline InodeIdentifier to_parent_id(const InodeIdentifier& identifier)
|
||||||
case PDI_PID_stacks:
|
case PDI_PID_stacks:
|
||||||
return to_identifier(identifier.fsid(), PDI_PID, to_pid(identifier), FI_PID_stacks);
|
return to_identifier(identifier.fsid(), PDI_PID, to_pid(identifier), FI_PID_stacks);
|
||||||
}
|
}
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -436,7 +436,7 @@ static bool procfs$devices(InodeIdentifier, KBufferBuilder& builder)
|
||||||
else if (device.is_character_device())
|
else if (device.is_character_device())
|
||||||
obj.add("type", "character");
|
obj.add("type", "character");
|
||||||
else
|
else
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
});
|
});
|
||||||
array.finish();
|
array.finish();
|
||||||
return true;
|
return true;
|
||||||
|
@ -633,7 +633,7 @@ static bool procfs$pid_exe(InodeIdentifier identifier, KBufferBuilder& builder)
|
||||||
if (!process)
|
if (!process)
|
||||||
return false;
|
return false;
|
||||||
auto* custody = process->executable();
|
auto* custody = process->executable();
|
||||||
ASSERT(custody);
|
VERIFY(custody);
|
||||||
builder.append(custody->absolute_path().bytes());
|
builder.append(custody->absolute_path().bytes());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -884,14 +884,14 @@ SysVariable& SysVariable::for_inode(InodeIdentifier id)
|
||||||
if (index >= sys_variables().size())
|
if (index >= sys_variables().size())
|
||||||
return sys_variables()[0];
|
return sys_variables()[0];
|
||||||
auto& variable = sys_variables()[index];
|
auto& variable = sys_variables()[index];
|
||||||
ASSERT(variable.address);
|
VERIFY(variable.address);
|
||||||
return variable;
|
return variable;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
|
static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
|
||||||
{
|
{
|
||||||
auto& variable = SysVariable::for_inode(inode_id);
|
auto& variable = SysVariable::for_inode(inode_id);
|
||||||
ASSERT(variable.type == SysVariable::Type::Boolean);
|
VERIFY(variable.type == SysVariable::Type::Boolean);
|
||||||
|
|
||||||
u8 buffer[2];
|
u8 buffer[2];
|
||||||
auto* lockable_bool = reinterpret_cast<Lockable<bool>*>(variable.address);
|
auto* lockable_bool = reinterpret_cast<Lockable<bool>*>(variable.address);
|
||||||
|
@ -907,7 +907,7 @@ static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
|
||||||
static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
|
static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
|
||||||
{
|
{
|
||||||
auto& variable = SysVariable::for_inode(inode_id);
|
auto& variable = SysVariable::for_inode(inode_id);
|
||||||
ASSERT(variable.type == SysVariable::Type::Boolean);
|
VERIFY(variable.type == SysVariable::Type::Boolean);
|
||||||
|
|
||||||
char value = 0;
|
char value = 0;
|
||||||
bool did_read = false;
|
bool did_read = false;
|
||||||
|
@ -920,7 +920,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
|
||||||
});
|
});
|
||||||
if (nread < 0)
|
if (nread < 0)
|
||||||
return nread;
|
return nread;
|
||||||
ASSERT(nread == 0 || (nread == 1 && did_read));
|
VERIFY(nread == 0 || (nread == 1 && did_read));
|
||||||
if (nread == 0 || !(value == '0' || value == '1'))
|
if (nread == 0 || !(value == '0' || value == '1'))
|
||||||
return (ssize_t)size;
|
return (ssize_t)size;
|
||||||
|
|
||||||
|
@ -936,7 +936,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
|
||||||
static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
|
static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
|
||||||
{
|
{
|
||||||
auto& variable = SysVariable::for_inode(inode_id);
|
auto& variable = SysVariable::for_inode(inode_id);
|
||||||
ASSERT(variable.type == SysVariable::Type::String);
|
VERIFY(variable.type == SysVariable::Type::String);
|
||||||
|
|
||||||
auto* lockable_string = reinterpret_cast<Lockable<String>*>(variable.address);
|
auto* lockable_string = reinterpret_cast<Lockable<String>*>(variable.address);
|
||||||
LOCKER(lockable_string->lock(), Lock::Mode::Shared);
|
LOCKER(lockable_string->lock(), Lock::Mode::Shared);
|
||||||
|
@ -947,7 +947,7 @@ static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
|
||||||
static ssize_t write_sys_string(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
|
static ssize_t write_sys_string(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
|
||||||
{
|
{
|
||||||
auto& variable = SysVariable::for_inode(inode_id);
|
auto& variable = SysVariable::for_inode(inode_id);
|
||||||
ASSERT(variable.type == SysVariable::Type::String);
|
VERIFY(variable.type == SysVariable::Type::String);
|
||||||
|
|
||||||
auto string_copy = buffer.copy_into_string(size);
|
auto string_copy = buffer.copy_into_string(size);
|
||||||
if (string_copy.is_null())
|
if (string_copy.is_null())
|
||||||
|
@ -1032,7 +1032,7 @@ RefPtr<Inode> ProcFS::get_inode(InodeIdentifier inode_id) const
|
||||||
}
|
}
|
||||||
auto inode = adopt(*new ProcFSInode(const_cast<ProcFS&>(*this), inode_id.index()));
|
auto inode = adopt(*new ProcFSInode(const_cast<ProcFS&>(*this), inode_id.index()));
|
||||||
auto result = m_inodes.set(inode_id.index().value(), inode.ptr());
|
auto result = m_inodes.set(inode_id.index().value(), inode.ptr());
|
||||||
ASSERT(result == ((it == m_inodes.end()) ? AK::HashSetResult::InsertedNewEntry : AK::HashSetResult::ReplacedExistingEntry));
|
VERIFY(result == ((it == m_inodes.end()) ? AK::HashSetResult::InsertedNewEntry : AK::HashSetResult::ReplacedExistingEntry));
|
||||||
return inode;
|
return inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1081,7 +1081,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
|
||||||
bool (*read_callback)(InodeIdentifier, KBufferBuilder&) = nullptr;
|
bool (*read_callback)(InodeIdentifier, KBufferBuilder&) = nullptr;
|
||||||
if (directory_entry) {
|
if (directory_entry) {
|
||||||
read_callback = directory_entry->read_callback;
|
read_callback = directory_entry->read_callback;
|
||||||
ASSERT(read_callback);
|
VERIFY(read_callback);
|
||||||
} else {
|
} else {
|
||||||
switch (to_proc_parent_directory(identifier())) {
|
switch (to_proc_parent_directory(identifier())) {
|
||||||
case PDI_PID_fd:
|
case PDI_PID_fd:
|
||||||
|
@ -1093,7 +1093,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
|
||||||
case PDI_Root_sys:
|
case PDI_Root_sys:
|
||||||
switch (SysVariable::for_inode(identifier()).type) {
|
switch (SysVariable::for_inode(identifier()).type) {
|
||||||
case SysVariable::Type::Invalid:
|
case SysVariable::Type::Invalid:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
case SysVariable::Type::Boolean:
|
case SysVariable::Type::Boolean:
|
||||||
read_callback = read_sys_bool;
|
read_callback = read_sys_bool;
|
||||||
break;
|
break;
|
||||||
|
@ -1103,10 +1103,10 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(read_callback);
|
VERIFY(read_callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cached_data)
|
if (!cached_data)
|
||||||
|
@ -1231,8 +1231,8 @@ InodeMetadata ProcFSInode::metadata() const
|
||||||
ssize_t ProcFSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
ssize_t ProcFSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
||||||
{
|
{
|
||||||
dbgln_if(PROCFS_DEBUG, "ProcFS: read_bytes offset: {} count: {}", offset, count);
|
dbgln_if(PROCFS_DEBUG, "ProcFS: read_bytes offset: {} count: {}", offset, count);
|
||||||
ASSERT(offset >= 0);
|
VERIFY(offset >= 0);
|
||||||
ASSERT(buffer.user_or_kernel_ptr());
|
VERIFY(buffer.user_or_kernel_ptr());
|
||||||
|
|
||||||
if (!description)
|
if (!description)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -1350,7 +1350,7 @@ KResult ProcFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntr
|
||||||
|
|
||||||
RefPtr<Inode> ProcFSInode::lookup(StringView name)
|
RefPtr<Inode> ProcFSInode::lookup(StringView name)
|
||||||
{
|
{
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
if (name == ".")
|
if (name == ".")
|
||||||
return this;
|
return this;
|
||||||
if (name == "..")
|
if (name == "..")
|
||||||
|
@ -1490,7 +1490,7 @@ ssize_t ProcFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelB
|
||||||
if (to_proc_parent_directory(identifier()) == PDI_Root_sys) {
|
if (to_proc_parent_directory(identifier()) == PDI_Root_sys) {
|
||||||
switch (SysVariable::for_inode(identifier()).type) {
|
switch (SysVariable::for_inode(identifier()).type) {
|
||||||
case SysVariable::Type::Invalid:
|
case SysVariable::Type::Invalid:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
case SysVariable::Type::Boolean:
|
case SysVariable::Type::Boolean:
|
||||||
write_callback = write_sys_bool;
|
write_callback = write_sys_bool;
|
||||||
break;
|
break;
|
||||||
|
@ -1506,9 +1506,9 @@ ssize_t ProcFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelB
|
||||||
write_callback = directory_entry->write_callback;
|
write_callback = directory_entry->write_callback;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(is_persistent_inode(identifier()));
|
VERIFY(is_persistent_inode(identifier()));
|
||||||
// FIXME: Being able to write into ProcFS at a non-zero offset seems like something we should maybe support..
|
// FIXME: Being able to write into ProcFS at a non-zero offset seems like something we should maybe support..
|
||||||
ASSERT(offset == 0);
|
VERIFY(offset == 0);
|
||||||
ssize_t nwritten = write_callback(identifier(), buffer, (size_t)size);
|
ssize_t nwritten = write_callback(identifier(), buffer, (size_t)size);
|
||||||
if (nwritten < 0)
|
if (nwritten < 0)
|
||||||
klog() << "ProcFS: Writing " << size << " bytes failed: " << nwritten;
|
klog() << "ProcFS: Writing " << size << " bytes failed: " << nwritten;
|
||||||
|
@ -1565,7 +1565,7 @@ KResultOr<NonnullRefPtr<Custody>> ProcFSInode::resolve_as_link(Custody& base, Re
|
||||||
res = &process->root_directory();
|
res = &process->root_directory();
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!res)
|
if (!res)
|
||||||
|
@ -1666,7 +1666,7 @@ KResult ProcFSInode::remove_child([[maybe_unused]] const StringView& name)
|
||||||
|
|
||||||
KResultOr<size_t> ProcFSInode::directory_entry_count() const
|
KResultOr<size_t> ProcFSInode::directory_entry_count() const
|
||||||
{
|
{
|
||||||
ASSERT(is_directory());
|
VERIFY(is_directory());
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
KResult result = traverse_as_directory([&count](auto&) {
|
KResult result = traverse_as_directory([&count](auto&) {
|
||||||
++count;
|
++count;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue