pthread_integration.cpp 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Atomic.h>
  7. #include <AK/NeverDestroyed.h>
  8. #include <AK/Types.h>
  9. #include <AK/Vector.h>
  10. #include <bits/pthread_integration.h>
  11. #include <errno.h>
  12. #include <sched.h>
  13. #include <serenity.h>
  14. #include <unistd.h>
  15. namespace {
  16. // Most programs don't need this, no need to incur an extra mutex lock/unlock on them
  17. static Atomic<bool> g_did_touch_atfork { false };
  18. static pthread_mutex_t g_atfork_list_mutex __PTHREAD_MUTEX_INITIALIZER;
  19. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_prepare_list;
  20. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_child_list;
  21. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_parent_list;
  22. }
  23. extern "C" {
  24. void __pthread_fork_prepare(void)
  25. {
  26. if (!g_did_touch_atfork.load())
  27. return;
  28. __pthread_mutex_lock(&g_atfork_list_mutex);
  29. for (auto entry : g_atfork_prepare_list.get())
  30. entry();
  31. __pthread_mutex_unlock(&g_atfork_list_mutex);
  32. }
  33. void __pthread_fork_child(void)
  34. {
  35. if (!g_did_touch_atfork.load())
  36. return;
  37. __pthread_mutex_lock(&g_atfork_list_mutex);
  38. for (auto entry : g_atfork_child_list.get())
  39. entry();
  40. __pthread_mutex_unlock(&g_atfork_list_mutex);
  41. }
  42. void __pthread_fork_parent(void)
  43. {
  44. if (!g_did_touch_atfork.load())
  45. return;
  46. __pthread_mutex_lock(&g_atfork_list_mutex);
  47. for (auto entry : g_atfork_parent_list.get())
  48. entry();
  49. __pthread_mutex_unlock(&g_atfork_list_mutex);
  50. }
  51. void __pthread_fork_atfork_register_prepare(void (*func)(void))
  52. {
  53. g_did_touch_atfork.store(true);
  54. __pthread_mutex_lock(&g_atfork_list_mutex);
  55. g_atfork_prepare_list->append(func);
  56. __pthread_mutex_unlock(&g_atfork_list_mutex);
  57. }
  58. void __pthread_fork_atfork_register_parent(void (*func)(void))
  59. {
  60. g_did_touch_atfork.store(true);
  61. __pthread_mutex_lock(&g_atfork_list_mutex);
  62. g_atfork_parent_list->append(func);
  63. __pthread_mutex_unlock(&g_atfork_list_mutex);
  64. }
  65. void __pthread_fork_atfork_register_child(void (*func)(void))
  66. {
  67. g_did_touch_atfork.store(true);
  68. __pthread_mutex_lock(&g_atfork_list_mutex);
  69. g_atfork_child_list->append(func);
  70. __pthread_mutex_unlock(&g_atfork_list_mutex);
  71. }
  72. int __pthread_self()
  73. {
  74. return gettid();
  75. }
  76. int pthread_self() __attribute__((weak, alias("__pthread_self")));
  77. static constexpr u32 MUTEX_UNLOCKED = 0;
  78. static constexpr u32 MUTEX_LOCKED_NO_NEED_TO_WAKE = 1;
  79. static constexpr u32 MUTEX_LOCKED_NEED_TO_WAKE = 2;
  80. int __pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
  81. {
  82. mutex->lock = 0;
  83. mutex->owner = 0;
  84. mutex->level = 0;
  85. mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
  86. return 0;
  87. }
  88. int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak, alias("__pthread_mutex_init")));
  89. int __pthread_mutex_trylock(pthread_mutex_t* mutex)
  90. {
  91. u32 expected = MUTEX_UNLOCKED;
  92. bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, expected, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
  93. if (exchanged) [[likely]] {
  94. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  95. AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
  96. mutex->level = 0;
  97. return 0;
  98. } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
  99. pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
  100. if (owner == __pthread_self()) {
  101. // We already own the mutex!
  102. mutex->level++;
  103. return 0;
  104. }
  105. }
  106. return EBUSY;
  107. }
  108. int pthread_mutex_trylock(pthread_mutex_t* mutex) __attribute__((weak, alias("__pthread_mutex_trylock")));
  109. int __pthread_mutex_lock(pthread_mutex_t* mutex)
  110. {
  111. // Fast path: attempt to claim the mutex without waiting.
  112. u32 value = MUTEX_UNLOCKED;
  113. bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, value, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
  114. if (exchanged) [[likely]] {
  115. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  116. AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
  117. mutex->level = 0;
  118. return 0;
  119. } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
  120. pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
  121. if (owner == __pthread_self()) {
  122. // We already own the mutex!
  123. mutex->level++;
  124. return 0;
  125. }
  126. }
  127. // Slow path: wait, record the fact that we're going to wait, and always
  128. // remember to wake the next thread up once we release the mutex.
  129. if (value != MUTEX_LOCKED_NEED_TO_WAKE)
  130. value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  131. while (value != MUTEX_UNLOCKED) {
  132. futex_wait(&mutex->lock, value, nullptr, 0);
  133. value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  134. }
  135. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  136. AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
  137. mutex->level = 0;
  138. return 0;
  139. }
  140. int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_lock")));
  141. int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
  142. {
  143. // Same as pthread_mutex_lock(), but always set MUTEX_LOCKED_NEED_TO_WAKE,
  144. // and also don't bother checking for already owning the mutex recursively,
  145. // because we know we don't. Used in the condition variable implementation.
  146. u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  147. while (value != MUTEX_UNLOCKED) {
  148. futex_wait(&mutex->lock, value, nullptr, 0);
  149. value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  150. }
  151. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  152. AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
  153. mutex->level = 0;
  154. return 0;
  155. }
  156. int __pthread_mutex_unlock(pthread_mutex_t* mutex)
  157. {
  158. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
  159. mutex->level--;
  160. return 0;
  161. }
  162. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  163. AK::atomic_store(&mutex->owner, 0, AK::memory_order_relaxed);
  164. u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_UNLOCKED, AK::memory_order_release);
  165. if (value == MUTEX_LOCKED_NEED_TO_WAKE) [[unlikely]] {
  166. int rc = futex_wake(&mutex->lock, 1);
  167. VERIFY(rc >= 0);
  168. }
  169. return 0;
  170. }
  171. int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_unlock")));
  172. }