mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-26 09:30:24 +00:00
LibC: Remove a bunch of weak pthread_*
symbols
This commit is contained in:
parent
cf4b7e343a
commit
224ac1a307
Notes:
sideshowbarker
2024-07-17 08:55:54 +09:00
Author: https://github.com/timschumi Commit: https://github.com/SerenityOS/serenity/commit/224ac1a307 Pull-request: https://github.com/SerenityOS/serenity/pull/14625 Reviewed-by: https://github.com/bgianfo ✅
8 changed files with 67 additions and 137 deletions
|
@ -18,21 +18,10 @@ void __pthread_fork_atfork_register_prepare(void (*)(void));
|
|||
void __pthread_fork_atfork_register_parent(void (*)(void));
|
||||
void __pthread_fork_atfork_register_child(void (*)(void));
|
||||
|
||||
int __pthread_mutex_init(pthread_mutex_t*, pthread_mutexattr_t const*);
|
||||
int __pthread_mutex_lock(pthread_mutex_t*);
|
||||
int __pthread_mutex_trylock(pthread_mutex_t*);
|
||||
int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t*);
|
||||
int __pthread_mutex_unlock(pthread_mutex_t*);
|
||||
|
||||
typedef void (*KeyDestructor)(void*);
|
||||
|
||||
int __pthread_key_create(pthread_key_t*, KeyDestructor);
|
||||
int __pthread_key_delete(pthread_key_t);
|
||||
void* __pthread_getspecific(pthread_key_t);
|
||||
int __pthread_setspecific(pthread_key_t, void const*);
|
||||
|
||||
int __pthread_self(void);
|
||||
|
||||
void __pthread_key_destroy_for_current_thread(void);
|
||||
|
||||
#define __PTHREAD_MUTEX_NORMAL 0
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <LibC/bits/FILE.h>
|
||||
#include <LibC/bits/pthread_integration.h>
|
||||
#include <LibC/bits/wchar.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#pragma once
|
||||
|
@ -21,7 +22,7 @@ public:
|
|||
, m_mode(mode)
|
||||
{
|
||||
pthread_mutexattr_t attr = { __PTHREAD_MUTEX_RECURSIVE };
|
||||
__pthread_mutex_init(&m_mutex, &attr);
|
||||
pthread_mutex_init(&m_mutex, &attr);
|
||||
}
|
||||
~FILE();
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <LibC/bits/pthread_integration.h>
|
||||
#include <LibC/mallocdefs.h>
|
||||
#include <assert.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
@ -80,13 +81,13 @@ void __begin_atexit_locking()
|
|||
|
||||
int __cxa_atexit(AtExitFunction exit_function, void* parameter, void* dso_handle)
|
||||
{
|
||||
__pthread_mutex_lock(&atexit_mutex);
|
||||
pthread_mutex_lock(&atexit_mutex);
|
||||
|
||||
// allocate initial atexit region
|
||||
if (!atexit_entries) {
|
||||
atexit_entries = (AtExitEntry*)mmap(nullptr, atexit_region_bytes(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
||||
if (atexit_entries == MAP_FAILED) {
|
||||
__pthread_mutex_unlock(&atexit_mutex);
|
||||
pthread_mutex_unlock(&atexit_mutex);
|
||||
perror("__cxa_atexit mmap");
|
||||
_exit(1);
|
||||
}
|
||||
|
@ -100,7 +101,7 @@ int __cxa_atexit(AtExitFunction exit_function, void* parameter, void* dso_handle
|
|||
|
||||
auto* new_atexit_entries = (AtExitEntry*)mmap(nullptr, new_atexit_region_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
||||
if (new_atexit_entries == MAP_FAILED) {
|
||||
__pthread_mutex_unlock(&atexit_mutex);
|
||||
pthread_mutex_unlock(&atexit_mutex);
|
||||
perror("__cxa_atexit mmap (new size)");
|
||||
return -1;
|
||||
}
|
||||
|
@ -118,7 +119,7 @@ int __cxa_atexit(AtExitFunction exit_function, void* parameter, void* dso_handle
|
|||
atexit_entries[atexit_entry_count++] = { exit_function, parameter, dso_handle };
|
||||
lock_atexit_handlers();
|
||||
|
||||
__pthread_mutex_unlock(&atexit_mutex);
|
||||
pthread_mutex_unlock(&atexit_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -132,7 +133,7 @@ void __cxa_finalize(void* dso_handle)
|
|||
// Multiple calls to __cxa_finalize shall not result in calling termination function entries multiple times;
|
||||
// the implementation may either remove entries or mark them finished.
|
||||
|
||||
__pthread_mutex_lock(&atexit_mutex);
|
||||
pthread_mutex_lock(&atexit_mutex);
|
||||
|
||||
if (atexit_entry_count > atexit_called_entries->size())
|
||||
atexit_called_entries->grow(atexit_entry_count, false);
|
||||
|
@ -147,13 +148,13 @@ void __cxa_finalize(void* dso_handle)
|
|||
if (needs_calling) {
|
||||
dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: calling entry[{}] {:p}({:p}) dso: {:p}", entry_index, exit_entry.method, exit_entry.parameter, exit_entry.dso_handle);
|
||||
atexit_called_entries->set(entry_index, true);
|
||||
__pthread_mutex_unlock(&atexit_mutex);
|
||||
pthread_mutex_unlock(&atexit_mutex);
|
||||
exit_entry.method(exit_entry.parameter);
|
||||
__pthread_mutex_lock(&atexit_mutex);
|
||||
pthread_mutex_lock(&atexit_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
__pthread_mutex_unlock(&atexit_mutex);
|
||||
pthread_mutex_unlock(&atexit_mutex);
|
||||
}
|
||||
|
||||
__attribute__((noreturn)) void __cxa_pure_virtual()
|
||||
|
|
|
@ -111,12 +111,6 @@ static int create_thread(pthread_t* thread, void* (*entry)(void*), void* argumen
|
|||
__RETURN_PTHREAD_ERROR(rc);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_self.html
|
||||
int pthread_self()
|
||||
{
|
||||
return __pthread_self();
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_create.html
|
||||
int pthread_create(pthread_t* thread, pthread_attr_t* attributes, void* (*start_routine)(void*), void* argument_to_start_routine)
|
||||
{
|
||||
|
@ -206,36 +200,12 @@ int pthread_sigmask(int how, sigset_t const* set, sigset_t* old_set)
|
|||
return 0;
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_init.html
|
||||
int pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attributes)
|
||||
{
|
||||
return __pthread_mutex_init(mutex, attributes);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_destroy.html
|
||||
int pthread_mutex_destroy(pthread_mutex_t*)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_lock.html
|
||||
int pthread_mutex_lock(pthread_mutex_t* mutex)
|
||||
{
|
||||
return __pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_trylock.html
|
||||
int pthread_mutex_trylock(pthread_mutex_t* mutex)
|
||||
{
|
||||
return __pthread_mutex_trylock(mutex);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_unlock.html
|
||||
int pthread_mutex_unlock(pthread_mutex_t* mutex)
|
||||
{
|
||||
return __pthread_mutex_unlock(mutex);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_init.html
|
||||
int pthread_mutexattr_init(pthread_mutexattr_t* attr)
|
||||
{
|
||||
|
@ -518,30 +488,6 @@ int pthread_cancel(pthread_t)
|
|||
TODO();
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_create.html
|
||||
int pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
|
||||
{
|
||||
return __pthread_key_create(key, destructor);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_delete.html
|
||||
int pthread_key_delete(pthread_key_t key)
|
||||
{
|
||||
return __pthread_key_delete(key);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_getspecific.html
|
||||
void* pthread_getspecific(pthread_key_t key)
|
||||
{
|
||||
return __pthread_getspecific(key);
|
||||
}
|
||||
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setspecific.html
|
||||
int pthread_setspecific(pthread_key_t key, void const* value)
|
||||
{
|
||||
return __pthread_setspecific(key, value);
|
||||
}
|
||||
|
||||
int pthread_setname_np(pthread_t thread, char const* name)
|
||||
{
|
||||
if (!name)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <AK/Vector.h>
|
||||
#include <bits/pthread_integration.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <serenity.h>
|
||||
#include <unistd.h>
|
||||
|
@ -31,10 +32,10 @@ void __pthread_fork_prepare(void)
|
|||
if (!g_did_touch_atfork.load())
|
||||
return;
|
||||
|
||||
__pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
for (auto entry : g_atfork_prepare_list.get())
|
||||
entry();
|
||||
__pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
void __pthread_fork_child(void)
|
||||
|
@ -42,10 +43,10 @@ void __pthread_fork_child(void)
|
|||
if (!g_did_touch_atfork.load())
|
||||
return;
|
||||
|
||||
__pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
for (auto entry : g_atfork_child_list.get())
|
||||
entry();
|
||||
__pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
void __pthread_fork_parent(void)
|
||||
|
@ -53,51 +54,51 @@ void __pthread_fork_parent(void)
|
|||
if (!g_did_touch_atfork.load())
|
||||
return;
|
||||
|
||||
__pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
for (auto entry : g_atfork_parent_list.get())
|
||||
entry();
|
||||
__pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
void __pthread_fork_atfork_register_prepare(void (*func)(void))
|
||||
{
|
||||
g_did_touch_atfork.store(true);
|
||||
|
||||
__pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
g_atfork_prepare_list->append(func);
|
||||
__pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
void __pthread_fork_atfork_register_parent(void (*func)(void))
|
||||
{
|
||||
g_did_touch_atfork.store(true);
|
||||
|
||||
__pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
g_atfork_parent_list->append(func);
|
||||
__pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
void __pthread_fork_atfork_register_child(void (*func)(void))
|
||||
{
|
||||
g_did_touch_atfork.store(true);
|
||||
|
||||
__pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
g_atfork_child_list->append(func);
|
||||
__pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
int __pthread_self()
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_self.html
|
||||
int pthread_self()
|
||||
{
|
||||
return gettid();
|
||||
}
|
||||
|
||||
int pthread_self() __attribute__((weak, alias("__pthread_self")));
|
||||
|
||||
static constexpr u32 MUTEX_UNLOCKED = 0;
|
||||
static constexpr u32 MUTEX_LOCKED_NO_NEED_TO_WAKE = 1;
|
||||
static constexpr u32 MUTEX_LOCKED_NEED_TO_WAKE = 2;
|
||||
|
||||
int __pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attributes)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_init.html
|
||||
int pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attributes)
|
||||
{
|
||||
mutex->lock = 0;
|
||||
mutex->owner = 0;
|
||||
|
@ -106,21 +107,20 @@ int __pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attr
|
|||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_init(pthread_mutex_t*, pthread_mutexattr_t const*) __attribute__((weak, alias("__pthread_mutex_init")));
|
||||
|
||||
int __pthread_mutex_trylock(pthread_mutex_t* mutex)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_trylock.html
|
||||
int pthread_mutex_trylock(pthread_mutex_t* mutex)
|
||||
{
|
||||
u32 expected = MUTEX_UNLOCKED;
|
||||
bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, expected, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
|
||||
|
||||
if (exchanged) [[likely]] {
|
||||
if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
|
||||
AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
|
||||
AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
|
||||
mutex->level = 0;
|
||||
return 0;
|
||||
} else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
|
||||
pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
|
||||
if (owner == __pthread_self()) {
|
||||
if (owner == pthread_self()) {
|
||||
// We already own the mutex!
|
||||
mutex->level++;
|
||||
return 0;
|
||||
|
@ -129,21 +129,20 @@ int __pthread_mutex_trylock(pthread_mutex_t* mutex)
|
|||
return EBUSY;
|
||||
}
|
||||
|
||||
int pthread_mutex_trylock(pthread_mutex_t* mutex) __attribute__((weak, alias("__pthread_mutex_trylock")));
|
||||
|
||||
int __pthread_mutex_lock(pthread_mutex_t* mutex)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_lock.html
|
||||
int pthread_mutex_lock(pthread_mutex_t* mutex)
|
||||
{
|
||||
// Fast path: attempt to claim the mutex without waiting.
|
||||
u32 value = MUTEX_UNLOCKED;
|
||||
bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, value, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
|
||||
if (exchanged) [[likely]] {
|
||||
if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
|
||||
AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
|
||||
AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
|
||||
mutex->level = 0;
|
||||
return 0;
|
||||
} else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
|
||||
pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
|
||||
if (owner == __pthread_self()) {
|
||||
if (owner == pthread_self()) {
|
||||
// We already own the mutex!
|
||||
mutex->level++;
|
||||
return 0;
|
||||
|
@ -161,13 +160,11 @@ int __pthread_mutex_lock(pthread_mutex_t* mutex)
|
|||
}
|
||||
|
||||
if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
|
||||
AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
|
||||
AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
|
||||
mutex->level = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_lock")));
|
||||
|
||||
int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
|
||||
{
|
||||
// Same as pthread_mutex_lock(), but always set MUTEX_LOCKED_NEED_TO_WAKE,
|
||||
|
@ -180,12 +177,13 @@ int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
|
|||
}
|
||||
|
||||
if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
|
||||
AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
|
||||
AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
|
||||
mutex->level = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __pthread_mutex_unlock(pthread_mutex_t* mutex)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_unlock.html
|
||||
int pthread_mutex_unlock(pthread_mutex_t* mutex)
|
||||
{
|
||||
if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
|
||||
mutex->level--;
|
||||
|
@ -203,6 +201,4 @@ int __pthread_mutex_unlock(pthread_mutex_t* mutex)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_unlock")));
|
||||
}
|
||||
|
|
|
@ -28,10 +28,11 @@ static KeyTable s_keys;
|
|||
|
||||
__thread SpecificTable t_specifics;
|
||||
|
||||
int __pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_create.html
|
||||
int pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
|
||||
{
|
||||
int ret = 0;
|
||||
__pthread_mutex_lock(&s_keys.mutex);
|
||||
pthread_mutex_lock(&s_keys.mutex);
|
||||
if (s_keys.next >= max_keys) {
|
||||
ret = EAGAIN;
|
||||
} else {
|
||||
|
@ -39,25 +40,23 @@ int __pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
|
|||
s_keys.destructors[*key] = destructor;
|
||||
ret = 0;
|
||||
}
|
||||
__pthread_mutex_unlock(&s_keys.mutex);
|
||||
pthread_mutex_unlock(&s_keys.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pthread_key_create(pthread_key_t*, KeyDestructor) __attribute__((weak, alias("__pthread_key_create")));
|
||||
|
||||
int __pthread_key_delete(pthread_key_t key)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_delete.html
|
||||
int pthread_key_delete(pthread_key_t key)
|
||||
{
|
||||
if (key < 0 || key >= max_keys)
|
||||
return EINVAL;
|
||||
__pthread_mutex_lock(&s_keys.mutex);
|
||||
pthread_mutex_lock(&s_keys.mutex);
|
||||
s_keys.destructors[key] = nullptr;
|
||||
__pthread_mutex_unlock(&s_keys.mutex);
|
||||
pthread_mutex_unlock(&s_keys.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_key_delete(pthread_key_t) __attribute__((weak, alias("__pthread_key_delete")));
|
||||
|
||||
void* __pthread_getspecific(pthread_key_t key)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_getspecific.html
|
||||
void* pthread_getspecific(pthread_key_t key)
|
||||
{
|
||||
if (key < 0)
|
||||
return nullptr;
|
||||
|
@ -66,9 +65,8 @@ void* __pthread_getspecific(pthread_key_t key)
|
|||
return t_specifics.values[key];
|
||||
}
|
||||
|
||||
void* pthread_getspecific(pthread_key_t) __attribute__((weak, alias("__pthread_getspecific")));
|
||||
|
||||
int __pthread_setspecific(pthread_key_t key, void const* value)
|
||||
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setspecific.html
|
||||
int pthread_setspecific(pthread_key_t key, void const* value)
|
||||
{
|
||||
if (key < 0)
|
||||
return EINVAL;
|
||||
|
@ -79,14 +77,12 @@ int __pthread_setspecific(pthread_key_t key, void const* value)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int pthread_setspecific(pthread_key_t, void const*) __attribute__((weak, alias("__pthread_setspecific")));
|
||||
|
||||
void __pthread_key_destroy_for_current_thread()
|
||||
{
|
||||
// This function will either be called during exit_thread, for a pthread, or
|
||||
// during global program shutdown for the main thread.
|
||||
|
||||
__pthread_mutex_lock(&s_keys.mutex);
|
||||
pthread_mutex_lock(&s_keys.mutex);
|
||||
size_t num_used_keys = s_keys.next;
|
||||
|
||||
// Dr. POSIX accounts for weird key destructors setting their own key again.
|
||||
|
@ -105,7 +101,7 @@ void __pthread_key_destroy_for_current_thread()
|
|||
if (!any_nonnull_destructors)
|
||||
break;
|
||||
}
|
||||
__pthread_mutex_unlock(&s_keys.mutex);
|
||||
pthread_mutex_unlock(&s_keys.mutex);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -534,12 +534,12 @@ bool FILE::Buffer::enqueue_front(u8 byte)
|
|||
|
||||
void FILE::lock()
|
||||
{
|
||||
__pthread_mutex_lock(&m_mutex);
|
||||
pthread_mutex_lock(&m_mutex);
|
||||
}
|
||||
|
||||
void FILE::unlock()
|
||||
{
|
||||
__pthread_mutex_unlock(&m_mutex);
|
||||
pthread_mutex_unlock(&m_mutex);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <LibELF/DynamicObject.h>
|
||||
#include <LibELF/Hashes.h>
|
||||
#include <fcntl.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include <syscall.h>
|
||||
|
@ -228,8 +229,8 @@ static void allocate_tls()
|
|||
|
||||
static int __dl_iterate_phdr(DlIteratePhdrCallbackFunction callback, void* data)
|
||||
{
|
||||
__pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { __pthread_mutex_unlock(&s_loader_lock); };
|
||||
pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { pthread_mutex_unlock(&s_loader_lock); };
|
||||
|
||||
for (auto& it : s_global_objects) {
|
||||
auto& object = it.value;
|
||||
|
@ -385,8 +386,8 @@ static Result<void, DlErrorMessage> __dlclose(void* handle)
|
|||
{
|
||||
dbgln_if(DYNAMIC_LOAD_DEBUG, "__dlclose: {}", handle);
|
||||
|
||||
__pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { __pthread_mutex_unlock(&s_loader_lock); };
|
||||
pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { pthread_mutex_unlock(&s_loader_lock); };
|
||||
|
||||
// FIXME: this will not currently destroy the dynamic object
|
||||
// because we're intentionally holding a strong reference to it
|
||||
|
@ -437,9 +438,9 @@ static Result<void*, DlErrorMessage> __dlopen(char const* filename, int flags)
|
|||
|
||||
auto library_name = get_library_name(filename ? filename : s_main_program_name);
|
||||
|
||||
if (__pthread_mutex_trylock(&s_loader_lock) != 0)
|
||||
if (pthread_mutex_trylock(&s_loader_lock) != 0)
|
||||
return DlErrorMessage { "Nested calls to dlopen() are not permitted." };
|
||||
ScopeGuard unlock_guard = [] { __pthread_mutex_unlock(&s_loader_lock); };
|
||||
ScopeGuard unlock_guard = [] { pthread_mutex_unlock(&s_loader_lock); };
|
||||
|
||||
auto existing_elf_object = s_global_objects.get(library_name);
|
||||
if (existing_elf_object.has_value()) {
|
||||
|
@ -484,8 +485,8 @@ static Result<void*, DlErrorMessage> __dlsym(void* handle, char const* symbol_na
|
|||
{
|
||||
dbgln_if(DYNAMIC_LOAD_DEBUG, "__dlsym: {}, {}", handle, symbol_name);
|
||||
|
||||
__pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { __pthread_mutex_unlock(&s_loader_lock); };
|
||||
pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { pthread_mutex_unlock(&s_loader_lock); };
|
||||
|
||||
StringView symbol_name_view { symbol_name, strlen(symbol_name) };
|
||||
Optional<DynamicObject::SymbolLookupResult> symbol;
|
||||
|
@ -510,8 +511,8 @@ static Result<void*, DlErrorMessage> __dlsym(void* handle, char const* symbol_na
|
|||
static Result<void, DlErrorMessage> __dladdr(void* addr, Dl_info* info)
|
||||
{
|
||||
VirtualAddress user_addr { addr };
|
||||
__pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { __pthread_mutex_unlock(&s_loader_lock); };
|
||||
pthread_mutex_lock(&s_loader_lock);
|
||||
ScopeGuard unlock_guard = [] { pthread_mutex_unlock(&s_loader_lock); };
|
||||
|
||||
RefPtr<DynamicObject> best_matching_library;
|
||||
VirtualAddress best_library_offset;
|
||||
|
|
Loading…
Reference in a new issue