mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-25 00:50:22 +00:00
AK: Make Vector::try_* functions return ErrorOr<void>
Instead of signalling allocation failure with a bool return value (false), we now use ErrorOr<void> and return ENOMEM as appropriate. This allows us to use TRY() and MUST() with Vector. :^)
This commit is contained in:
parent
cd49f30bea
commit
88b6428c25
Notes:
sideshowbarker
2024-07-18 01:18:14 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/88b6428c25e
16 changed files with 98 additions and 152 deletions
158
AK/Vector.h
158
AK/Vector.h
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
||||
* Copyright (c) 2021, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
|
@ -8,6 +8,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <AK/Assertions.h>
|
||||
#include <AK/Error.h>
|
||||
#include <AK/Find.h>
|
||||
#include <AK/Forward.h>
|
||||
#include <AK/Iterator.h>
|
||||
|
@ -204,49 +205,41 @@ public:
|
|||
template<typename U = T>
|
||||
void insert(size_t index, U&& value) requires(CanBePlacedInsideVector<U>)
|
||||
{
|
||||
auto did_allocate = try_insert<U>(index, forward<U>(value));
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_insert<U>(index, forward<U>(value)));
|
||||
}
|
||||
|
||||
template<typename TUnaryPredicate, typename U = T>
|
||||
void insert_before_matching(U&& value, TUnaryPredicate predicate, size_t first_index = 0, size_t* inserted_index = nullptr) requires(CanBePlacedInsideVector<U>)
|
||||
{
|
||||
auto did_allocate = try_insert_before_matching(forward<U>(value), predicate, first_index, inserted_index);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_insert_before_matching(forward<U>(value), predicate, first_index, inserted_index));
|
||||
}
|
||||
|
||||
void extend(Vector&& other)
|
||||
{
|
||||
auto did_allocate = try_extend(move(other));
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_extend(move(other)));
|
||||
}
|
||||
|
||||
void extend(Vector const& other)
|
||||
{
|
||||
auto did_allocate = try_extend(other);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_extend(other));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void append(T&& value)
|
||||
{
|
||||
bool did_allocate;
|
||||
if constexpr (contains_reference)
|
||||
did_allocate = try_append(value);
|
||||
MUST(try_append(value));
|
||||
else
|
||||
did_allocate = try_append(move(value));
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_append(move(value)));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void append(T const& value) requires(!contains_reference)
|
||||
{
|
||||
auto did_allocate = try_append(T(value));
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_append(T(value)));
|
||||
}
|
||||
|
||||
void append(StorageType const* values, size_t count)
|
||||
{
|
||||
auto did_allocate = try_append(values, count);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_append(values, count));
|
||||
}
|
||||
|
||||
template<typename U = T>
|
||||
|
@ -263,27 +256,23 @@ public:
|
|||
template<class... Args>
|
||||
void empend(Args&&... args) requires(!contains_reference)
|
||||
{
|
||||
auto did_allocate = try_empend(forward<Args>(args)...);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_empend(forward<Args>(args)...));
|
||||
}
|
||||
|
||||
template<typename U = T>
|
||||
void prepend(U&& value) requires(CanBePlacedInsideVector<U>)
|
||||
{
|
||||
auto did_allocate = try_insert(0, forward<U>(value));
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_insert(0, forward<U>(value)));
|
||||
}
|
||||
|
||||
void prepend(Vector&& other)
|
||||
{
|
||||
auto did_allocate = try_prepend(move(other));
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_prepend(move(other)));
|
||||
}
|
||||
|
||||
void prepend(StorageType const* values, size_t count)
|
||||
{
|
||||
auto did_allocate = try_prepend(values, count);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_prepend(values, count));
|
||||
}
|
||||
|
||||
// FIXME: What about assigning from a vector with lower inline capacity?
|
||||
|
@ -451,14 +440,13 @@ public:
|
|||
}
|
||||
|
||||
template<typename U = T>
|
||||
[[nodiscard]] bool try_insert(size_t index, U&& value) requires(CanBePlacedInsideVector<U>)
|
||||
ErrorOr<void> try_insert(size_t index, U&& value) requires(CanBePlacedInsideVector<U>)
|
||||
{
|
||||
if (index > size())
|
||||
return false;
|
||||
return Error::from_errno(EINVAL);
|
||||
if (index == size())
|
||||
return try_append(forward<U>(value));
|
||||
if (!try_grow_capacity(size() + 1))
|
||||
return false;
|
||||
TRY(try_grow_capacity(size() + 1));
|
||||
++m_size;
|
||||
if constexpr (Traits<StorageType>::is_trivial()) {
|
||||
TypedTransfer<StorageType>::move(slot(index + 1), slot(index), m_size - index - 1);
|
||||
|
@ -472,109 +460,101 @@ public:
|
|||
new (slot(index)) StorageType(&value);
|
||||
else
|
||||
new (slot(index)) StorageType(forward<U>(value));
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
template<typename TUnaryPredicate, typename U = T>
|
||||
[[nodiscard]] bool try_insert_before_matching(U&& value, TUnaryPredicate predicate, size_t first_index = 0, size_t* inserted_index = nullptr) requires(CanBePlacedInsideVector<U>)
|
||||
ErrorOr<void> try_insert_before_matching(U&& value, TUnaryPredicate predicate, size_t first_index = 0, size_t* inserted_index = nullptr) requires(CanBePlacedInsideVector<U>)
|
||||
{
|
||||
for (size_t i = first_index; i < size(); ++i) {
|
||||
if (predicate(at(i))) {
|
||||
if (!try_insert(i, forward<U>(value)))
|
||||
return false;
|
||||
TRY(try_insert(i, forward<U>(value)));
|
||||
if (inserted_index)
|
||||
*inserted_index = i;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
}
|
||||
if (!try_append(forward<U>(value)))
|
||||
return false;
|
||||
TRY(try_append(forward<U>(value)));
|
||||
if (inserted_index)
|
||||
*inserted_index = size() - 1;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_extend(Vector&& other)
|
||||
ErrorOr<void> try_extend(Vector&& other)
|
||||
{
|
||||
if (is_empty()) {
|
||||
*this = move(other);
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
auto other_size = other.size();
|
||||
Vector tmp = move(other);
|
||||
if (!try_grow_capacity(size() + other_size))
|
||||
return false;
|
||||
TRY(try_grow_capacity(size() + other_size));
|
||||
TypedTransfer<StorageType>::move(data() + m_size, tmp.data(), other_size);
|
||||
m_size += other_size;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_extend(Vector const& other)
|
||||
ErrorOr<void> try_extend(Vector const& other)
|
||||
{
|
||||
if (!try_grow_capacity(size() + other.size()))
|
||||
return false;
|
||||
TRY(try_grow_capacity(size() + other.size()));
|
||||
TypedTransfer<StorageType>::copy(data() + m_size, other.data(), other.size());
|
||||
m_size += other.m_size;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool try_append(T&& value)
|
||||
ErrorOr<void> try_append(T&& value)
|
||||
{
|
||||
if (!try_grow_capacity(size() + 1))
|
||||
return false;
|
||||
TRY(try_grow_capacity(size() + 1));
|
||||
if constexpr (contains_reference)
|
||||
new (slot(m_size)) StorageType(&value);
|
||||
else
|
||||
new (slot(m_size)) StorageType(move(value));
|
||||
++m_size;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool try_append(T const& value) requires(!contains_reference)
|
||||
ErrorOr<void> try_append(T const& value) requires(!contains_reference)
|
||||
{
|
||||
return try_append(T(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_append(StorageType const* values, size_t count)
|
||||
ErrorOr<void> try_append(StorageType const* values, size_t count)
|
||||
{
|
||||
if (!count)
|
||||
return true;
|
||||
if (!try_grow_capacity(size() + count))
|
||||
return false;
|
||||
return {};
|
||||
TRY(try_grow_capacity(size() + count));
|
||||
TypedTransfer<StorageType>::copy(slot(m_size), values, count);
|
||||
m_size += count;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
template<class... Args>
|
||||
[[nodiscard]] bool try_empend(Args&&... args) requires(!contains_reference)
|
||||
ErrorOr<void> try_empend(Args&&... args) requires(!contains_reference)
|
||||
{
|
||||
if (!try_grow_capacity(m_size + 1))
|
||||
return false;
|
||||
TRY(try_grow_capacity(m_size + 1));
|
||||
new (slot(m_size)) StorageType { forward<Args>(args)... };
|
||||
++m_size;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
template<typename U = T>
|
||||
[[nodiscard]] bool try_prepend(U&& value) requires(CanBePlacedInsideVector<U>)
|
||||
ErrorOr<void> try_prepend(U&& value) requires(CanBePlacedInsideVector<U>)
|
||||
{
|
||||
return try_insert(0, forward<U>(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_prepend(Vector&& other)
|
||||
ErrorOr<void> try_prepend(Vector&& other)
|
||||
{
|
||||
if (other.is_empty())
|
||||
return true;
|
||||
return {};
|
||||
|
||||
if (is_empty()) {
|
||||
*this = move(other);
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
auto other_size = other.size();
|
||||
if (!try_grow_capacity(size() + other_size))
|
||||
return false;
|
||||
TRY(try_grow_capacity(size() + other_size));
|
||||
|
||||
for (size_t i = size() + other_size - 1; i >= other.size(); --i) {
|
||||
new (slot(i)) StorageType(move(at(i - other_size)));
|
||||
|
@ -584,36 +564,35 @@ public:
|
|||
Vector tmp = move(other);
|
||||
TypedTransfer<StorageType>::move(slot(0), tmp.data(), tmp.size());
|
||||
m_size += other_size;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_prepend(StorageType const* values, size_t count)
|
||||
ErrorOr<void> try_prepend(StorageType const* values, size_t count)
|
||||
{
|
||||
if (!count)
|
||||
return true;
|
||||
if (!try_grow_capacity(size() + count))
|
||||
return false;
|
||||
return {};
|
||||
TRY(try_grow_capacity(size() + count));
|
||||
TypedTransfer<StorageType>::move(slot(count), slot(0), m_size);
|
||||
TypedTransfer<StorageType>::copy(slot(0), values, count);
|
||||
m_size += count;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_grow_capacity(size_t needed_capacity)
|
||||
ErrorOr<void> try_grow_capacity(size_t needed_capacity)
|
||||
{
|
||||
if (m_capacity >= needed_capacity)
|
||||
return true;
|
||||
return {};
|
||||
return try_ensure_capacity(padded_capacity(needed_capacity));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_ensure_capacity(size_t needed_capacity)
|
||||
ErrorOr<void> try_ensure_capacity(size_t needed_capacity)
|
||||
{
|
||||
if (m_capacity >= needed_capacity)
|
||||
return true;
|
||||
return {};
|
||||
size_t new_capacity = kmalloc_good_size(needed_capacity * sizeof(StorageType)) / sizeof(StorageType);
|
||||
auto* new_buffer = static_cast<StorageType*>(kmalloc_array(new_capacity, sizeof(StorageType)));
|
||||
if (new_buffer == nullptr)
|
||||
return false;
|
||||
return Error::from_errno(ENOMEM);
|
||||
|
||||
if constexpr (Traits<StorageType>::is_trivial()) {
|
||||
TypedTransfer<StorageType>::copy(new_buffer, data(), m_size);
|
||||
|
@ -627,40 +606,37 @@ public:
|
|||
kfree_sized(m_outline_buffer, m_capacity * sizeof(StorageType));
|
||||
m_outline_buffer = new_buffer;
|
||||
m_capacity = new_capacity;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_resize(size_t new_size, bool keep_capacity = false) requires(!contains_reference)
|
||||
ErrorOr<void> try_resize(size_t new_size, bool keep_capacity = false) requires(!contains_reference)
|
||||
{
|
||||
if (new_size <= size()) {
|
||||
shrink(new_size, keep_capacity);
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
if (!try_ensure_capacity(new_size))
|
||||
return false;
|
||||
TRY(try_ensure_capacity(new_size));
|
||||
|
||||
for (size_t i = size(); i < new_size; ++i)
|
||||
new (slot(i)) StorageType {};
|
||||
m_size = new_size;
|
||||
return true;
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] bool try_resize_and_keep_capacity(size_t new_size) requires(!contains_reference)
|
||||
ErrorOr<void> try_resize_and_keep_capacity(size_t new_size) requires(!contains_reference)
|
||||
{
|
||||
return try_resize(new_size, true);
|
||||
}
|
||||
|
||||
void grow_capacity(size_t needed_capacity)
|
||||
{
|
||||
auto did_allocate = try_grow_capacity(needed_capacity);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_grow_capacity(needed_capacity));
|
||||
}
|
||||
|
||||
void ensure_capacity(size_t needed_capacity)
|
||||
{
|
||||
auto did_allocate = try_ensure_capacity(needed_capacity);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_ensure_capacity(needed_capacity));
|
||||
}
|
||||
|
||||
void shrink(size_t new_size, bool keep_capacity = false)
|
||||
|
@ -684,14 +660,12 @@ public:
|
|||
|
||||
void resize(size_t new_size, bool keep_capacity = false) requires(!contains_reference)
|
||||
{
|
||||
auto did_allocate = try_resize(new_size, keep_capacity);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_resize(new_size, keep_capacity));
|
||||
}
|
||||
|
||||
void resize_and_keep_capacity(size_t new_size) requires(!contains_reference)
|
||||
{
|
||||
auto did_allocate = try_resize_and_keep_capacity(new_size);
|
||||
VERIFY(did_allocate);
|
||||
MUST(try_resize_and_keep_capacity(new_size));
|
||||
}
|
||||
|
||||
using ConstIterator = SimpleIterator<Vector const, T const>;
|
||||
|
|
|
@ -917,8 +917,7 @@ ErrorOr<void> Ext2FSInode::resize(u64 new_size)
|
|||
|
||||
if (blocks_needed_after > blocks_needed_before) {
|
||||
auto blocks = TRY(fs().allocate_blocks(fs().group_index_from_inode(index()), blocks_needed_after - blocks_needed_before));
|
||||
if (!m_block_list.try_extend(move(blocks)))
|
||||
return ENOMEM;
|
||||
TRY(m_block_list.try_extend(move(blocks)));
|
||||
} else if (blocks_needed_after < blocks_needed_before) {
|
||||
if constexpr (EXT2_VERY_DEBUG) {
|
||||
dbgln("Ext2FSInode[{}]::resize(): Shrinking inode, old block list is {} entries:", identifier(), m_block_list.size());
|
||||
|
@ -1264,8 +1263,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
|
|||
return Vector<BlockIndex> {};
|
||||
|
||||
Vector<BlockIndex> blocks;
|
||||
if (!blocks.try_ensure_capacity(count))
|
||||
return ENOMEM;
|
||||
TRY(blocks.try_ensure_capacity(count));
|
||||
|
||||
MutexLocker locker(m_lock);
|
||||
auto group_index = preferred_group_index;
|
||||
|
@ -1454,8 +1452,7 @@ ErrorOr<Ext2FS::CachedBitmap*> Ext2FS::get_bitmap_block(BlockIndex bitmap_block_
|
|||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(block->data());
|
||||
TRY(read_block(bitmap_block_index, &buffer, block_size()));
|
||||
auto new_bitmap = TRY(adopt_nonnull_own_or_enomem(new (nothrow) CachedBitmap(bitmap_block_index, move(block))));
|
||||
if (!m_cached_bitmaps.try_append(move(new_bitmap)))
|
||||
return ENOMEM;
|
||||
TRY(m_cached_bitmaps.try_append(move(new_bitmap)));
|
||||
return m_cached_bitmaps.last().ptr();
|
||||
}
|
||||
|
||||
|
|
|
@ -55,10 +55,7 @@ public:
|
|||
if (has_flag(m_current_header->file_flags, ISO::FileFlags::Directory)) {
|
||||
dbgln_if(ISO9660_VERY_DEBUG, "next(): Recursing");
|
||||
{
|
||||
bool result = m_directory_stack.try_append(move(m_current_directory));
|
||||
if (!result) {
|
||||
return ENOMEM;
|
||||
}
|
||||
TRY(m_directory_stack.try_append(move(m_current_directory)));
|
||||
}
|
||||
|
||||
dbgln_if(ISO9660_VERY_DEBUG, "next(): Pushed into directory stack");
|
||||
|
|
|
@ -110,8 +110,7 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
|
||||
// Otherwise, split the regions and collect them for future mapping.
|
||||
auto split_regions = TRY(try_split_region_around_range(*region, range_to_unmap));
|
||||
if (new_regions.try_extend(split_regions))
|
||||
return ENOMEM;
|
||||
TRY(new_regions.try_extend(split_regions));
|
||||
}
|
||||
|
||||
// Give back any unwanted VM to the range allocator.
|
||||
|
|
|
@ -487,8 +487,7 @@ ErrorOr<void> LocalSocket::sendfd(OpenFileDescription const& socket_description,
|
|||
// FIXME: Figure out how we should limit this properly.
|
||||
if (queue.size() > 128)
|
||||
return set_so_error(EBUSY);
|
||||
if (!queue.try_append(move(passing_description)))
|
||||
return set_so_error(ENOMEM);
|
||||
SOCKET_TRY(queue.try_append(move(passing_description)));
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
|
@ -71,8 +71,7 @@ ErrorOr<void> Socket::queue_connection_from(NonnullRefPtr<Socket> peer)
|
|||
MutexLocker locker(mutex());
|
||||
if (m_pending.size() >= m_backlog)
|
||||
return set_so_error(ECONNREFUSED);
|
||||
if (!m_pending.try_append(peer))
|
||||
return set_so_error(ENOMEM);
|
||||
SOCKET_TRY(m_pending.try_append(peer));
|
||||
evaluate_block_conditions();
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -144,18 +144,15 @@ ErrorOr<NonnullRefPtr<Process>> Process::try_create_user_process(RefPtr<Thread>&
|
|||
auto parts = path.split_view('/');
|
||||
if (arguments.is_empty()) {
|
||||
auto last_part = TRY(KString::try_create(parts.last()));
|
||||
if (!arguments.try_append(move(last_part)))
|
||||
return ENOMEM;
|
||||
TRY(arguments.try_append(move(last_part)));
|
||||
}
|
||||
|
||||
auto path_string = TRY(KString::try_create(path));
|
||||
auto name = TRY(KString::try_create(parts.last()));
|
||||
auto process = TRY(Process::try_create(first_thread, move(name), uid, gid, ProcessID(0), false, VirtualFileSystem::the().root_custody(), nullptr, tty));
|
||||
|
||||
if (!process->m_fds.try_resize(process->m_fds.max_open())) {
|
||||
first_thread = nullptr;
|
||||
return ENOMEM;
|
||||
}
|
||||
TRY(process->m_fds.try_resize(process->m_fds.max_open()));
|
||||
|
||||
auto& device_to_use_as_tty = tty ? (CharacterDevice&)*tty : DeviceManagement::the().null_device();
|
||||
auto description = TRY(device_to_use_as_tty.open(O_RDWR));
|
||||
auto setup_description = [&process, &description](int fd) {
|
||||
|
|
|
@ -641,8 +641,7 @@ public:
|
|||
ErrorOr<void> try_clone(const Kernel::Process::OpenFileDescriptions& other)
|
||||
{
|
||||
SpinlockLocker lock_other(other.m_fds_lock);
|
||||
if (!try_resize(other.m_fds_metadatas.size()))
|
||||
return ENOMEM;
|
||||
TRY(try_resize(other.m_fds_metadatas.size()));
|
||||
|
||||
for (size_t i = 0; i < other.m_fds_metadatas.size(); ++i) {
|
||||
m_fds_metadatas[i] = other.m_fds_metadatas[i];
|
||||
|
@ -662,7 +661,7 @@ public:
|
|||
ErrorOr<ScopedDescriptionAllocation> allocate(int first_candidate_fd = 0);
|
||||
size_t open_count() const;
|
||||
|
||||
bool try_resize(size_t size) { return m_fds_metadatas.try_resize(size); }
|
||||
ErrorOr<void> try_resize(size_t size) { return m_fds_metadatas.try_resize(size); }
|
||||
|
||||
size_t max_open() const
|
||||
{
|
||||
|
|
|
@ -100,15 +100,13 @@ static ErrorOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]]
|
|||
Vector<FlatPtr> argv_entries;
|
||||
for (auto& argument : arguments) {
|
||||
push_string_on_new_stack(argument.view());
|
||||
if (!argv_entries.try_append(new_sp))
|
||||
return ENOMEM;
|
||||
TRY(argv_entries.try_append(new_sp));
|
||||
}
|
||||
|
||||
Vector<FlatPtr> env_entries;
|
||||
for (auto& variable : environment) {
|
||||
push_string_on_new_stack(variable.view());
|
||||
if (!env_entries.try_append(new_sp))
|
||||
return ENOMEM;
|
||||
TRY(env_entries.try_append(new_sp));
|
||||
}
|
||||
|
||||
for (auto& value : auxiliary_values) {
|
||||
|
@ -810,8 +808,7 @@ ErrorOr<void> Process::exec(NonnullOwnPtr<KString> path, NonnullOwnPtrVector<KSt
|
|||
auto shebang_words = shebang_result.release_value();
|
||||
auto shebang_path = TRY(shebang_words.first().try_clone());
|
||||
arguments.ptr_at(0) = move(path);
|
||||
if (!arguments.try_prepend(move(shebang_words)))
|
||||
return ENOMEM;
|
||||
TRY(arguments.try_prepend(move(shebang_words)));
|
||||
return exec(move(shebang_path), move(arguments), move(environment), ++recursion_depth);
|
||||
}
|
||||
|
||||
|
@ -881,13 +878,11 @@ ErrorOr<FlatPtr> Process::sys$execve(Userspace<const Syscall::SC_execve_params*>
|
|||
if (size.has_overflow())
|
||||
return EOVERFLOW;
|
||||
Vector<Syscall::StringArgument, 32> strings;
|
||||
if (!strings.try_resize(list.length))
|
||||
return ENOMEM;
|
||||
TRY(strings.try_resize(list.length));
|
||||
TRY(copy_from_user(strings.data(), list.strings, size.value()));
|
||||
for (size_t i = 0; i < list.length; ++i) {
|
||||
auto string = TRY(try_copy_kstring_from_user(strings[i]));
|
||||
if (!output.try_append(move(string)))
|
||||
return ENOMEM;
|
||||
TRY(output.try_append(move(string)));
|
||||
}
|
||||
return {};
|
||||
};
|
||||
|
|
|
@ -27,8 +27,8 @@ ErrorOr<FlatPtr> Process::sys$purge(int mode)
|
|||
if (vmobject.is_anonymous()) {
|
||||
// In the event that the append fails, only attempt to continue
|
||||
// the purge if we have already appended something successfully.
|
||||
if (!vmobjects.try_append(static_cast<Memory::AnonymousVMObject&>(vmobject)) && vmobjects.is_empty()) {
|
||||
result = ENOMEM;
|
||||
if (auto append_result = vmobjects.try_append(static_cast<Memory::AnonymousVMObject&>(vmobject)); append_result.is_error() && vmobjects.is_empty()) {
|
||||
result = append_result.release_error();
|
||||
return IterationDecision::Break;
|
||||
}
|
||||
}
|
||||
|
@ -50,8 +50,8 @@ ErrorOr<FlatPtr> Process::sys$purge(int mode)
|
|||
if (vmobject.is_inode()) {
|
||||
// In the event that the append fails, only attempt to continue
|
||||
// the purge if we have already appended something successfully.
|
||||
if (!vmobjects.try_append(static_cast<Memory::InodeVMObject&>(vmobject)) && vmobjects.is_empty()) {
|
||||
result = ENOMEM;
|
||||
if (auto append_result = vmobjects.try_append(static_cast<Memory::InodeVMObject&>(vmobject)); append_result.is_error() && vmobjects.is_empty()) {
|
||||
result = append_result.release_error();
|
||||
return IterationDecision::Break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,8 +50,7 @@ ErrorOr<FlatPtr> Process::sys$readv(int fd, Userspace<const struct iovec*> iov,
|
|||
|
||||
u64 total_length = 0;
|
||||
Vector<iovec, 32> vecs;
|
||||
if (!vecs.try_resize(iov_count))
|
||||
return ENOMEM;
|
||||
TRY(vecs.try_resize(iov_count));
|
||||
TRY(copy_n_from_user(vecs.data(), iov, iov_count));
|
||||
for (auto& vec : vecs) {
|
||||
total_length += vec.iov_len;
|
||||
|
|
|
@ -72,10 +72,8 @@ ErrorOr<FlatPtr> Process::sys$select(Userspace<const Syscall::SC_select_params*>
|
|||
continue;
|
||||
|
||||
auto description = TRY(fds().open_file_description(fd));
|
||||
if (!fds_info.try_append({ move(description), block_flags }))
|
||||
return ENOMEM;
|
||||
if (!selected_fds.try_append(fd))
|
||||
return ENOMEM;
|
||||
TRY(fds_info.try_append({ move(description), block_flags }));
|
||||
TRY(selected_fds.try_append(fd));
|
||||
}
|
||||
|
||||
if constexpr (IO_DEBUG || POLL_SELECT_DEBUG)
|
||||
|
@ -146,8 +144,7 @@ ErrorOr<FlatPtr> Process::sys$poll(Userspace<const Syscall::SC_poll_params*> use
|
|||
nfds_checked *= params.nfds;
|
||||
if (nfds_checked.has_overflow())
|
||||
return EFAULT;
|
||||
if (!fds_copy.try_resize(params.nfds))
|
||||
return ENOMEM;
|
||||
TRY(fds_copy.try_resize(params.nfds));
|
||||
TRY(copy_from_user(fds_copy.data(), ¶ms.fds[0], nfds_checked.value()));
|
||||
}
|
||||
|
||||
|
@ -162,8 +159,7 @@ ErrorOr<FlatPtr> Process::sys$poll(Userspace<const Syscall::SC_poll_params*> use
|
|||
block_flags |= BlockFlags::Write;
|
||||
if (pfd.events & POLLPRI)
|
||||
block_flags |= BlockFlags::ReadPriority;
|
||||
if (!fds_info.try_append({ move(description), block_flags }))
|
||||
return ENOMEM;
|
||||
TRY(fds_info.try_append({ move(description), block_flags }));
|
||||
}
|
||||
|
||||
auto current_thread = Thread::current();
|
||||
|
|
|
@ -169,8 +169,7 @@ ErrorOr<FlatPtr> Process::sys$setgroups(size_t count, Userspace<const gid_t*> us
|
|||
}
|
||||
|
||||
Vector<gid_t> new_extra_gids;
|
||||
if (!new_extra_gids.try_resize(count))
|
||||
return ENOMEM;
|
||||
TRY(new_extra_gids.try_resize(count));
|
||||
TRY(copy_n_from_user(new_extra_gids.data(), user_gids, count));
|
||||
|
||||
HashTable<gid_t> unique_extra_gids;
|
||||
|
@ -180,8 +179,7 @@ ErrorOr<FlatPtr> Process::sys$setgroups(size_t count, Userspace<const gid_t*> us
|
|||
}
|
||||
|
||||
ProtectedDataMutationScope scope { *this };
|
||||
if (!m_protected_values.extra_gids.try_resize(unique_extra_gids.size()))
|
||||
return ENOMEM;
|
||||
TRY(m_protected_values.extra_gids.try_resize(unique_extra_gids.size()));
|
||||
size_t i = 0;
|
||||
for (auto& extra_gid : unique_extra_gids) {
|
||||
if (extra_gid == gid())
|
||||
|
|
|
@ -168,8 +168,7 @@ ErrorOr<FlatPtr> Process::sys$sendmsg(int sockfd, Userspace<const struct msghdr*
|
|||
if (msg.msg_iovlen != 1)
|
||||
return ENOTSUP; // FIXME: Support this :)
|
||||
Vector<iovec, 1> iovs;
|
||||
if (!iovs.try_resize(msg.msg_iovlen))
|
||||
return ENOMEM;
|
||||
TRY(iovs.try_resize(msg.msg_iovlen));
|
||||
TRY(copy_n_from_user(iovs.data(), msg.msg_iov, msg.msg_iovlen));
|
||||
if (iovs[0].iov_len > NumericLimits<ssize_t>::max())
|
||||
return EINVAL;
|
||||
|
@ -201,8 +200,7 @@ ErrorOr<FlatPtr> Process::sys$recvmsg(int sockfd, Userspace<struct msghdr*> user
|
|||
if (msg.msg_iovlen != 1)
|
||||
return ENOTSUP; // FIXME: Support this :)
|
||||
Vector<iovec, 1> iovs;
|
||||
if (!iovs.try_resize(msg.msg_iovlen))
|
||||
return ENOMEM;
|
||||
TRY(iovs.try_resize(msg.msg_iovlen));
|
||||
TRY(copy_n_from_user(iovs.data(), msg.msg_iov, msg.msg_iovlen));
|
||||
|
||||
Userspace<sockaddr*> user_addr((FlatPtr)msg.msg_name);
|
||||
|
|
|
@ -24,8 +24,7 @@ ErrorOr<FlatPtr> Process::sys$writev(int fd, Userspace<const struct iovec*> iov,
|
|||
|
||||
u64 total_length = 0;
|
||||
Vector<iovec, 32> vecs;
|
||||
if (!vecs.try_resize(iov_count))
|
||||
return ENOMEM;
|
||||
TRY(vecs.try_resize(iov_count));
|
||||
TRY(copy_n_from_user(vecs.data(), iov, iov_count));
|
||||
for (auto& vec : vecs) {
|
||||
total_length += vec.iov_len;
|
||||
|
|
|
@ -311,7 +311,7 @@ public:
|
|||
return false;
|
||||
}
|
||||
auto previous_size = m_elements.size();
|
||||
if (!m_elements.try_resize(new_size))
|
||||
if (m_elements.try_resize(new_size).is_error())
|
||||
return false;
|
||||
for (size_t i = previous_size; i < m_elements.size(); ++i)
|
||||
m_elements[i] = fill_value;
|
||||
|
|
Loading…
Reference in a new issue