pthread_integration.cpp 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Atomic.h>
  7. #include <AK/NeverDestroyed.h>
  8. #include <AK/Types.h>
  9. #include <AK/Vector.h>
  10. #include <bits/pthread_integration.h>
  11. #include <errno.h>
  12. #include <pthread.h>
  13. #include <sched.h>
  14. #include <serenity.h>
  15. #include <unistd.h>
  16. namespace {
  17. // Most programs don't need this, no need to incur an extra mutex lock/unlock on them
  18. static Atomic<bool> g_did_touch_atfork { false };
  19. static pthread_mutex_t g_atfork_list_mutex __PTHREAD_MUTEX_INITIALIZER;
  20. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_prepare_list;
  21. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_child_list;
  22. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_parent_list;
  23. }
  24. extern "C" {
  25. void __pthread_fork_prepare(void)
  26. {
  27. if (!g_did_touch_atfork.load())
  28. return;
  29. pthread_mutex_lock(&g_atfork_list_mutex);
  30. for (auto entry : g_atfork_prepare_list.get())
  31. entry();
  32. pthread_mutex_unlock(&g_atfork_list_mutex);
  33. }
  34. void __pthread_fork_child(void)
  35. {
  36. if (!g_did_touch_atfork.load())
  37. return;
  38. pthread_mutex_lock(&g_atfork_list_mutex);
  39. for (auto entry : g_atfork_child_list.get())
  40. entry();
  41. pthread_mutex_unlock(&g_atfork_list_mutex);
  42. }
  43. void __pthread_fork_parent(void)
  44. {
  45. if (!g_did_touch_atfork.load())
  46. return;
  47. pthread_mutex_lock(&g_atfork_list_mutex);
  48. for (auto entry : g_atfork_parent_list.get())
  49. entry();
  50. pthread_mutex_unlock(&g_atfork_list_mutex);
  51. }
  52. void __pthread_fork_atfork_register_prepare(void (*func)(void))
  53. {
  54. g_did_touch_atfork.store(true);
  55. pthread_mutex_lock(&g_atfork_list_mutex);
  56. g_atfork_prepare_list->append(func);
  57. pthread_mutex_unlock(&g_atfork_list_mutex);
  58. }
  59. void __pthread_fork_atfork_register_parent(void (*func)(void))
  60. {
  61. g_did_touch_atfork.store(true);
  62. pthread_mutex_lock(&g_atfork_list_mutex);
  63. g_atfork_parent_list->append(func);
  64. pthread_mutex_unlock(&g_atfork_list_mutex);
  65. }
  66. void __pthread_fork_atfork_register_child(void (*func)(void))
  67. {
  68. g_did_touch_atfork.store(true);
  69. pthread_mutex_lock(&g_atfork_list_mutex);
  70. g_atfork_child_list->append(func);
  71. pthread_mutex_unlock(&g_atfork_list_mutex);
  72. }
  73. // https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_self.html
  74. int pthread_self()
  75. {
  76. return gettid();
  77. }
  78. static constexpr u32 MUTEX_UNLOCKED = 0;
  79. static constexpr u32 MUTEX_LOCKED_NO_NEED_TO_WAKE = 1;
  80. static constexpr u32 MUTEX_LOCKED_NEED_TO_WAKE = 2;
  81. // https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_init.html
  82. int pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attributes)
  83. {
  84. mutex->lock = 0;
  85. mutex->owner = 0;
  86. mutex->level = 0;
  87. mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
  88. return 0;
  89. }
  90. // https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_trylock.html
  91. int pthread_mutex_trylock(pthread_mutex_t* mutex)
  92. {
  93. u32 expected = MUTEX_UNLOCKED;
  94. bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, expected, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
  95. if (exchanged) [[likely]] {
  96. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  97. AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
  98. mutex->level = 0;
  99. return 0;
  100. } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
  101. pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
  102. if (owner == pthread_self()) {
  103. // We already own the mutex!
  104. mutex->level++;
  105. return 0;
  106. }
  107. }
  108. return EBUSY;
  109. }
  110. // https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_lock.html
  111. int pthread_mutex_lock(pthread_mutex_t* mutex)
  112. {
  113. // Fast path: attempt to claim the mutex without waiting.
  114. u32 value = MUTEX_UNLOCKED;
  115. bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, value, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
  116. if (exchanged) [[likely]] {
  117. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  118. AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
  119. mutex->level = 0;
  120. return 0;
  121. } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
  122. pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
  123. if (owner == pthread_self()) {
  124. // We already own the mutex!
  125. mutex->level++;
  126. return 0;
  127. }
  128. }
  129. // Slow path: wait, record the fact that we're going to wait, and always
  130. // remember to wake the next thread up once we release the mutex.
  131. if (value != MUTEX_LOCKED_NEED_TO_WAKE)
  132. value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  133. while (value != MUTEX_UNLOCKED) {
  134. futex_wait(&mutex->lock, value, nullptr, 0, false);
  135. value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  136. }
  137. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  138. AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
  139. mutex->level = 0;
  140. return 0;
  141. }
  142. int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
  143. {
  144. // Same as pthread_mutex_lock(), but always set MUTEX_LOCKED_NEED_TO_WAKE,
  145. // and also don't bother checking for already owning the mutex recursively,
  146. // because we know we don't. Used in the condition variable implementation.
  147. u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  148. while (value != MUTEX_UNLOCKED) {
  149. futex_wait(&mutex->lock, value, nullptr, 0, false);
  150. value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
  151. }
  152. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  153. AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
  154. mutex->level = 0;
  155. return 0;
  156. }
  157. // https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_unlock.html
  158. int pthread_mutex_unlock(pthread_mutex_t* mutex)
  159. {
  160. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
  161. mutex->level--;
  162. return 0;
  163. }
  164. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
  165. AK::atomic_store(&mutex->owner, 0, AK::memory_order_relaxed);
  166. u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_UNLOCKED, AK::memory_order_release);
  167. if (value == MUTEX_LOCKED_NEED_TO_WAKE) [[unlikely]] {
  168. int rc = futex_wake(&mutex->lock, 1, false);
  169. VERIFY(rc >= 0);
  170. }
  171. return 0;
  172. }
  173. }