pthread_integration.cpp 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Copyright (c) 2021, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Atomic.h>
  7. #include <AK/NeverDestroyed.h>
  8. #include <AK/Vector.h>
  9. #include <bits/pthread_integration.h>
  10. #include <sched.h>
  11. #include <unistd.h>
  12. namespace {
  13. // Most programs don't need this, no need to incur an extra mutex lock/unlock on them
  14. static Atomic<bool> g_did_touch_atfork { false };
  15. static pthread_mutex_t g_atfork_list_mutex __PTHREAD_MUTEX_INITIALIZER;
  16. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_prepare_list;
  17. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_child_list;
  18. static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_parent_list;
  19. }
  20. extern "C" {
  21. void __pthread_fork_prepare(void)
  22. {
  23. if (!g_did_touch_atfork.load())
  24. return;
  25. __pthread_mutex_lock(&g_atfork_list_mutex);
  26. for (auto entry : g_atfork_prepare_list.get())
  27. entry();
  28. __pthread_mutex_unlock(&g_atfork_list_mutex);
  29. }
  30. void __pthread_fork_child(void)
  31. {
  32. if (!g_did_touch_atfork.load())
  33. return;
  34. __pthread_mutex_lock(&g_atfork_list_mutex);
  35. for (auto entry : g_atfork_child_list.get())
  36. entry();
  37. __pthread_mutex_unlock(&g_atfork_list_mutex);
  38. }
  39. void __pthread_fork_parent(void)
  40. {
  41. if (!g_did_touch_atfork.load())
  42. return;
  43. __pthread_mutex_lock(&g_atfork_list_mutex);
  44. for (auto entry : g_atfork_parent_list.get())
  45. entry();
  46. __pthread_mutex_unlock(&g_atfork_list_mutex);
  47. }
  48. void __pthread_fork_atfork_register_prepare(void (*func)(void))
  49. {
  50. g_did_touch_atfork.store(true);
  51. __pthread_mutex_lock(&g_atfork_list_mutex);
  52. g_atfork_prepare_list->append(func);
  53. __pthread_mutex_unlock(&g_atfork_list_mutex);
  54. }
  55. void __pthread_fork_atfork_register_parent(void (*func)(void))
  56. {
  57. g_did_touch_atfork.store(true);
  58. __pthread_mutex_lock(&g_atfork_list_mutex);
  59. g_atfork_parent_list->append(func);
  60. __pthread_mutex_unlock(&g_atfork_list_mutex);
  61. }
  62. void __pthread_fork_atfork_register_child(void (*func)(void))
  63. {
  64. g_did_touch_atfork.store(true);
  65. __pthread_mutex_lock(&g_atfork_list_mutex);
  66. g_atfork_child_list->append(func);
  67. __pthread_mutex_unlock(&g_atfork_list_mutex);
  68. }
  69. int __pthread_self()
  70. {
  71. return gettid();
  72. }
  73. int pthread_self() __attribute__((weak, alias("__pthread_self")));
  74. int __pthread_mutex_lock(pthread_mutex_t* mutex)
  75. {
  76. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  77. pthread_t this_thread = __pthread_self();
  78. for (;;) {
  79. u32 expected = false;
  80. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  81. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->owner == this_thread) {
  82. mutex->level++;
  83. return 0;
  84. }
  85. sched_yield();
  86. continue;
  87. }
  88. mutex->owner = this_thread;
  89. mutex->level = 0;
  90. return 0;
  91. }
  92. }
  93. int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_lock")));
  94. int __pthread_mutex_unlock(pthread_mutex_t* mutex)
  95. {
  96. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
  97. mutex->level--;
  98. return 0;
  99. }
  100. mutex->owner = 0;
  101. mutex->lock = 0;
  102. return 0;
  103. }
  104. int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_unlock")));
  105. int __pthread_mutex_trylock(pthread_mutex_t* mutex)
  106. {
  107. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  108. u32 expected = false;
  109. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  110. if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->owner == pthread_self()) {
  111. mutex->level++;
  112. return 0;
  113. }
  114. return EBUSY;
  115. }
  116. mutex->owner = pthread_self();
  117. mutex->level = 0;
  118. return 0;
  119. }
  120. int pthread_mutex_trylock(pthread_mutex_t* mutex) __attribute__((weak, alias("__pthread_mutex_trylock")));
  121. int __pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
  122. {
  123. mutex->lock = 0;
  124. mutex->owner = 0;
  125. mutex->level = 0;
  126. mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
  127. return 0;
  128. }
  129. int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak, alias("__pthread_mutex_init")));
  130. }