mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-21 23:20:20 +00:00
Everywhere: Fix some alignment issues
When creating uninitialized storage for variables, we need to make sure that the alignment is correct. Fixes a KUBSAN failure when running kernels compiled with Clang. In `Syscalls/socket.cpp`, we can simply use local variables, as `sockaddr_un` is a POD type. Along with moving the `alignas` specifier to the correct member, `AK::Optional`'s internal buffer has been made non-zeroed by default. GCC emitted bogus uninitialized memory access warnings, so we now use `__builtin_launder` to tell the compiler that we know what we are doing. This might disable some optimizations, but judging by how GCC failed to notice that the memory's initialization is dependent on `m_has_value`, I'm not sure that's a bad thing.
This commit is contained in:
parent
45a82b2a5b
commit
b9f30c6f2a
Notes:
sideshowbarker
2024-07-19 01:59:31 +09:00
Author: https://github.com/BertalanD Commit: https://github.com/SerenityOS/serenity/commit/b9f30c6f2a6 Pull-request: https://github.com/SerenityOS/serenity/pull/8354 Reviewed-by: https://github.com/Hendiadyoin1 Reviewed-by: https://github.com/alimpfard
5 changed files with 17 additions and 17 deletions
|
@ -14,7 +14,7 @@
|
|||
namespace AK {
|
||||
|
||||
template<typename T>
|
||||
class alignas(T) [[nodiscard]] Optional {
|
||||
class [[nodiscard]] Optional {
|
||||
public:
|
||||
using ValueType = T;
|
||||
|
||||
|
@ -132,13 +132,13 @@ public:
|
|||
[[nodiscard]] ALWAYS_INLINE T& value()
|
||||
{
|
||||
VERIFY(m_has_value);
|
||||
return *reinterpret_cast<T*>(&m_storage);
|
||||
return *__builtin_launder(reinterpret_cast<T*>(&m_storage));
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE const T& value() const
|
||||
{
|
||||
VERIFY(m_has_value);
|
||||
return *reinterpret_cast<const T*>(&m_storage);
|
||||
return *__builtin_launder(reinterpret_cast<const T*>(&m_storage));
|
||||
}
|
||||
|
||||
[[nodiscard]] T release_value()
|
||||
|
@ -164,7 +164,7 @@ public:
|
|||
ALWAYS_INLINE T* operator->() { return &value(); }
|
||||
|
||||
private:
|
||||
u8 m_storage[sizeof(T)] { 0 };
|
||||
alignas(T) u8 m_storage[sizeof(T)];
|
||||
bool m_has_value { false };
|
||||
};
|
||||
|
||||
|
|
|
@ -185,7 +185,7 @@ struct KmallocGlobalHeap {
|
|||
};
|
||||
|
||||
READONLY_AFTER_INIT static KmallocGlobalHeap* g_kmalloc_global;
|
||||
static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalHeap)];
|
||||
alignas(KmallocGlobalHeap) static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalHeap)];
|
||||
|
||||
// Treat the heap as logically separate from .bss
|
||||
__attribute__((section(".heap"))) static u8 kmalloc_eternal_heap[ETERNAL_RANGE_SIZE];
|
||||
|
|
|
@ -119,10 +119,10 @@ KResultOr<FlatPtr> Process::sys$accept4(Userspace<const Syscall::SC_accept4_para
|
|||
VERIFY(accepted_socket);
|
||||
|
||||
if (user_address) {
|
||||
u8 address_buffer[sizeof(sockaddr_un)];
|
||||
sockaddr_un address_buffer;
|
||||
address_size = min(sizeof(sockaddr_un), static_cast<size_t>(address_size));
|
||||
accepted_socket->get_peer_address((sockaddr*)address_buffer, &address_size);
|
||||
if (!copy_to_user(user_address, address_buffer, address_size))
|
||||
accepted_socket->get_peer_address((sockaddr*)&address_buffer, &address_size);
|
||||
if (!copy_to_user(user_address, &address_buffer, address_size))
|
||||
return EFAULT;
|
||||
if (!copy_to_user(user_address_size, &address_size))
|
||||
return EFAULT;
|
||||
|
@ -311,13 +311,13 @@ int Process::get_sock_or_peer_name(const Params& params)
|
|||
auto& socket = *description->socket();
|
||||
REQUIRE_PROMISE_FOR_SOCKET_DOMAIN(socket.domain());
|
||||
|
||||
u8 address_buffer[sizeof(sockaddr_un)];
|
||||
sockaddr_un address_buffer;
|
||||
addrlen_value = min(sizeof(sockaddr_un), static_cast<size_t>(addrlen_value));
|
||||
if constexpr (sockname)
|
||||
socket.get_local_address((sockaddr*)address_buffer, &addrlen_value);
|
||||
socket.get_local_address((sockaddr*)&address_buffer, &addrlen_value);
|
||||
else
|
||||
socket.get_peer_address((sockaddr*)address_buffer, &addrlen_value);
|
||||
if (!copy_to_user(params.addr, address_buffer, addrlen_value))
|
||||
socket.get_peer_address((sockaddr*)&address_buffer, &addrlen_value);
|
||||
if (!copy_to_user(params.addr, &address_buffer, addrlen_value))
|
||||
return EFAULT;
|
||||
if (!copy_to_user(params.addrlen, &addrlen_value))
|
||||
return EFAULT;
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
|
||||
static Threading::Lock& malloc_lock()
|
||||
{
|
||||
static u32 lock_storage[sizeof(Threading::Lock) / sizeof(u32)];
|
||||
return *reinterpret_cast<Threading::Lock*>(&lock_storage);
|
||||
alignas(Threading::Lock) static u8 lock_storage[sizeof(Threading::Lock)];
|
||||
return *reinterpret_cast<Threading::Lock*>(lock_storage);
|
||||
}
|
||||
|
||||
constexpr size_t number_of_hot_chunked_blocks_to_keep_around = 16;
|
||||
|
@ -111,8 +111,8 @@ struct BigAllocator {
|
|||
// are run. Similarly, we can not allow global destructors to destruct
|
||||
// them. We could have used AK::NeverDestoyed to prevent the latter,
|
||||
// but it would have not helped with the former.
|
||||
static u8 g_allocators_storage[sizeof(Allocator) * num_size_classes];
|
||||
static u8 g_big_allocators_storage[sizeof(BigAllocator)];
|
||||
alignas(Allocator) static u8 g_allocators_storage[sizeof(Allocator) * num_size_classes];
|
||||
alignas(BigAllocator) static u8 g_big_allocators_storage[sizeof(BigAllocator)];
|
||||
|
||||
static inline Allocator (&allocators())[num_size_classes]
|
||||
{
|
||||
|
|
|
@ -614,7 +614,7 @@ private:
|
|||
|
||||
extern "C" {
|
||||
|
||||
static u8 default_streams[3][sizeof(FILE)];
|
||||
alignas(FILE) static u8 default_streams[3][sizeof(FILE)];
|
||||
FILE* stdin = reinterpret_cast<FILE*>(&default_streams[0]);
|
||||
FILE* stdout = reinterpret_cast<FILE*>(&default_streams[1]);
|
||||
FILE* stderr = reinterpret_cast<FILE*>(&default_streams[2]);
|
||||
|
|
Loading…
Reference in a new issue