pthread.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/Atomic.h>
  28. #include <AK/Debug.h>
  29. #include <AK/StdLibExtras.h>
  30. #include <Kernel/API/Syscall.h>
  31. #include <LibSystem/syscall.h>
  32. #include <limits.h>
  33. #include <pthread.h>
  34. #include <serenity.h>
  35. #include <signal.h>
  36. #include <stdio.h>
  37. #include <string.h>
  38. #include <sys/mman.h>
  39. #include <syscall.h>
  40. #include <time.h>
  41. #include <unistd.h>
  42. namespace {
  43. using PthreadAttrImpl = Syscall::SC_create_thread_params;
  44. struct KeyDestroyer {
  45. ~KeyDestroyer() { destroy_for_current_thread(); }
  46. static void destroy_for_current_thread();
  47. };
  48. } // end anonymous namespace
  49. constexpr size_t required_stack_alignment = 4 * MiB;
  50. constexpr size_t highest_reasonable_guard_size = 32 * PAGE_SIZE;
  51. constexpr size_t highest_reasonable_stack_size = 8 * MiB; // That's the default in Ubuntu?
  52. // Create an RAII object with a global destructor to destroy pthread keys for the main thread.
  53. // Impact of this: Any global object that wants to do something with pthread_getspecific
  54. // in its destructor from the main thread might be in for a nasty surprise.
  55. static KeyDestroyer s_key_destroyer;
  56. #define __RETURN_PTHREAD_ERROR(rc) \
  57. return ((rc) < 0 ? -(rc) : 0)
  58. extern "C" {
  59. static void* pthread_create_helper(void* (*routine)(void*), void* argument)
  60. {
  61. void* ret_val = routine(argument);
  62. pthread_exit(ret_val);
  63. return nullptr;
  64. }
  65. static int create_thread(pthread_t* thread, void* (*entry)(void*), void* argument, PthreadAttrImpl* thread_params)
  66. {
  67. void** stack = (void**)((uintptr_t)thread_params->m_stack_location + thread_params->m_stack_size);
  68. auto push_on_stack = [&](void* data) {
  69. stack--;
  70. *stack = data;
  71. thread_params->m_stack_size -= sizeof(void*);
  72. };
  73. // We set up the stack for pthread_create_helper.
  74. // Note that we need to align the stack to 16B, accounting for
  75. // the fact that we also push 8 bytes.
  76. while (((uintptr_t)stack - 8) % 16 != 0)
  77. push_on_stack(nullptr);
  78. push_on_stack(argument);
  79. push_on_stack((void*)entry);
  80. ASSERT((uintptr_t)stack % 16 == 0);
  81. // Push a fake return address
  82. push_on_stack(nullptr);
  83. int rc = syscall(SC_create_thread, pthread_create_helper, thread_params);
  84. if (rc >= 0)
  85. *thread = rc;
  86. __RETURN_PTHREAD_ERROR(rc);
  87. }
  88. [[noreturn]] static void exit_thread(void* code)
  89. {
  90. KeyDestroyer::destroy_for_current_thread();
  91. syscall(SC_exit_thread, code);
  92. ASSERT_NOT_REACHED();
  93. }
  94. int pthread_self()
  95. {
  96. return gettid();
  97. }
  98. int pthread_create(pthread_t* thread, pthread_attr_t* attributes, void* (*start_routine)(void*), void* argument_to_start_routine)
  99. {
  100. if (!thread)
  101. return -EINVAL;
  102. PthreadAttrImpl default_attributes {};
  103. PthreadAttrImpl** arg_attributes = reinterpret_cast<PthreadAttrImpl**>(attributes);
  104. PthreadAttrImpl* used_attributes = arg_attributes ? *arg_attributes : &default_attributes;
  105. if (!used_attributes->m_stack_location) {
  106. // adjust stack size, user might have called setstacksize, which has no restrictions on size/alignment
  107. if (0 != (used_attributes->m_stack_size % required_stack_alignment))
  108. used_attributes->m_stack_size += required_stack_alignment - (used_attributes->m_stack_size % required_stack_alignment);
  109. used_attributes->m_stack_location = mmap_with_name(nullptr, used_attributes->m_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, 0, 0, "Thread stack");
  110. if (!used_attributes->m_stack_location)
  111. return -1;
  112. }
  113. #if PTHREAD_DEBUG
  114. dbgprintf("pthread_create: Creating thread with attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  115. used_attributes,
  116. (PTHREAD_CREATE_JOINABLE == used_attributes->m_detach_state) ? "joinable" : "detached",
  117. used_attributes->m_schedule_priority,
  118. used_attributes->m_guard_page_size,
  119. used_attributes->m_stack_size,
  120. used_attributes->m_stack_location);
  121. #endif
  122. return create_thread(thread, start_routine, argument_to_start_routine, used_attributes);
  123. }
  124. void pthread_exit(void* value_ptr)
  125. {
  126. exit_thread(value_ptr);
  127. }
  128. int pthread_join(pthread_t thread, void** exit_value_ptr)
  129. {
  130. int rc = syscall(SC_join_thread, thread, exit_value_ptr);
  131. __RETURN_PTHREAD_ERROR(rc);
  132. }
  133. int pthread_detach(pthread_t thread)
  134. {
  135. int rc = syscall(SC_detach_thread, thread);
  136. __RETURN_PTHREAD_ERROR(rc);
  137. }
  138. int pthread_sigmask(int how, const sigset_t* set, sigset_t* old_set)
  139. {
  140. if (sigprocmask(how, set, old_set))
  141. return errno;
  142. return 0;
  143. }
  144. int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
  145. {
  146. mutex->lock = 0;
  147. mutex->owner = 0;
  148. mutex->level = 0;
  149. mutex->type = attributes ? attributes->type : PTHREAD_MUTEX_NORMAL;
  150. return 0;
  151. }
  152. int pthread_mutex_destroy(pthread_mutex_t*)
  153. {
  154. return 0;
  155. }
  156. int pthread_mutex_lock(pthread_mutex_t* mutex)
  157. {
  158. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  159. pthread_t this_thread = pthread_self();
  160. for (;;) {
  161. u32 expected = false;
  162. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  163. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == this_thread) {
  164. mutex->level++;
  165. return 0;
  166. }
  167. sched_yield();
  168. continue;
  169. }
  170. mutex->owner = this_thread;
  171. mutex->level = 0;
  172. return 0;
  173. }
  174. }
  175. int pthread_mutex_trylock(pthread_mutex_t* mutex)
  176. {
  177. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  178. u32 expected = false;
  179. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  180. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == pthread_self()) {
  181. mutex->level++;
  182. return 0;
  183. }
  184. return EBUSY;
  185. }
  186. mutex->owner = pthread_self();
  187. mutex->level = 0;
  188. return 0;
  189. }
  190. int pthread_mutex_unlock(pthread_mutex_t* mutex)
  191. {
  192. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
  193. mutex->level--;
  194. return 0;
  195. }
  196. mutex->owner = 0;
  197. mutex->lock = 0;
  198. return 0;
  199. }
  200. int pthread_mutexattr_init(pthread_mutexattr_t* attr)
  201. {
  202. attr->type = PTHREAD_MUTEX_NORMAL;
  203. return 0;
  204. }
  205. int pthread_mutexattr_destroy(pthread_mutexattr_t*)
  206. {
  207. return 0;
  208. }
  209. int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
  210. {
  211. if (!attr)
  212. return EINVAL;
  213. if (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE)
  214. return EINVAL;
  215. attr->type = type;
  216. return 0;
  217. }
  218. int pthread_attr_init(pthread_attr_t* attributes)
  219. {
  220. auto* impl = new PthreadAttrImpl {};
  221. *attributes = impl;
  222. #if PTHREAD_DEBUG
  223. dbgprintf("pthread_attr_init: New thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  224. impl,
  225. (PTHREAD_CREATE_JOINABLE == impl->m_detach_state) ? "joinable" : "detached",
  226. impl->m_schedule_priority,
  227. impl->m_guard_page_size,
  228. impl->m_stack_size,
  229. impl->m_stack_location);
  230. #endif
  231. return 0;
  232. }
  233. int pthread_attr_destroy(pthread_attr_t* attributes)
  234. {
  235. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  236. delete attributes_impl;
  237. return 0;
  238. }
  239. int pthread_attr_getdetachstate(const pthread_attr_t* attributes, int* p_detach_state)
  240. {
  241. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  242. if (!attributes_impl || !p_detach_state)
  243. return EINVAL;
  244. *p_detach_state = attributes_impl->m_detach_state;
  245. return 0;
  246. }
  247. int pthread_attr_setdetachstate(pthread_attr_t* attributes, int detach_state)
  248. {
  249. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  250. if (!attributes_impl)
  251. return EINVAL;
  252. if (detach_state != PTHREAD_CREATE_JOINABLE && detach_state != PTHREAD_CREATE_DETACHED)
  253. return EINVAL;
  254. attributes_impl->m_detach_state = detach_state;
  255. #if PTHREAD_DEBUG
  256. dbgprintf("pthread_attr_setdetachstate: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  257. attributes_impl,
  258. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  259. attributes_impl->m_schedule_priority,
  260. attributes_impl->m_guard_page_size,
  261. attributes_impl->m_stack_size,
  262. attributes_impl->m_stack_location);
  263. #endif
  264. return 0;
  265. }
  266. int pthread_attr_getguardsize(const pthread_attr_t* attributes, size_t* p_guard_size)
  267. {
  268. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  269. if (!attributes_impl || !p_guard_size)
  270. return EINVAL;
  271. *p_guard_size = attributes_impl->m_reported_guard_page_size;
  272. return 0;
  273. }
  274. int pthread_attr_setguardsize(pthread_attr_t* attributes, size_t guard_size)
  275. {
  276. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  277. if (!attributes_impl)
  278. return EINVAL;
  279. size_t actual_guard_size = guard_size;
  280. // round up
  281. if (0 != (guard_size % PAGE_SIZE))
  282. actual_guard_size += PAGE_SIZE - (guard_size % PAGE_SIZE);
  283. // what is the user even doing?
  284. if (actual_guard_size > highest_reasonable_guard_size) {
  285. return EINVAL;
  286. }
  287. attributes_impl->m_guard_page_size = actual_guard_size;
  288. attributes_impl->m_reported_guard_page_size = guard_size; // POSIX, why?
  289. #if PTHREAD_DEBUG
  290. dbgprintf("pthread_attr_setguardsize: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  291. attributes_impl,
  292. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  293. attributes_impl->m_schedule_priority,
  294. attributes_impl->m_guard_page_size,
  295. attributes_impl->m_stack_size,
  296. attributes_impl->m_stack_location);
  297. #endif
  298. return 0;
  299. }
  300. int pthread_attr_getschedparam(const pthread_attr_t* attributes, struct sched_param* p_sched_param)
  301. {
  302. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  303. if (!attributes_impl || !p_sched_param)
  304. return EINVAL;
  305. p_sched_param->sched_priority = attributes_impl->m_schedule_priority;
  306. return 0;
  307. }
  308. int pthread_attr_setschedparam(pthread_attr_t* attributes, const struct sched_param* p_sched_param)
  309. {
  310. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  311. if (!attributes_impl || !p_sched_param)
  312. return EINVAL;
  313. if (p_sched_param->sched_priority < THREAD_PRIORITY_MIN || p_sched_param->sched_priority > THREAD_PRIORITY_MAX)
  314. return ENOTSUP;
  315. attributes_impl->m_schedule_priority = p_sched_param->sched_priority;
  316. #if PTHREAD_DEBUG
  317. dbgprintf("pthread_attr_setschedparam: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  318. attributes_impl,
  319. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  320. attributes_impl->m_schedule_priority,
  321. attributes_impl->m_guard_page_size,
  322. attributes_impl->m_stack_size,
  323. attributes_impl->m_stack_location);
  324. #endif
  325. return 0;
  326. }
  327. int pthread_attr_getstack(const pthread_attr_t* attributes, void** p_stack_ptr, size_t* p_stack_size)
  328. {
  329. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  330. if (!attributes_impl || !p_stack_ptr || !p_stack_size)
  331. return EINVAL;
  332. *p_stack_ptr = attributes_impl->m_stack_location;
  333. *p_stack_size = attributes_impl->m_stack_size;
  334. return 0;
  335. }
  336. int pthread_attr_setstack(pthread_attr_t* attributes, void* p_stack, size_t stack_size)
  337. {
  338. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  339. if (!attributes_impl || !p_stack)
  340. return EINVAL;
  341. // Check for required alignment on size
  342. if (0 != (stack_size % required_stack_alignment))
  343. return EINVAL;
  344. // FIXME: Check for required alignment on pointer?
  345. // FIXME: "[EACCES] The stack page(s) described by stackaddr and stacksize are not both readable and writable by the thread."
  346. // Have to check that the whole range is mapped to this process/thread? Can we defer this to create_thread?
  347. attributes_impl->m_stack_size = stack_size;
  348. attributes_impl->m_stack_location = p_stack;
  349. #if PTHREAD_DEBUG
  350. dbgprintf("pthread_attr_setstack: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  351. attributes_impl,
  352. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  353. attributes_impl->m_schedule_priority,
  354. attributes_impl->m_guard_page_size,
  355. attributes_impl->m_stack_size,
  356. attributes_impl->m_stack_location);
  357. #endif
  358. return 0;
  359. }
  360. int pthread_attr_getstacksize(const pthread_attr_t* attributes, size_t* p_stack_size)
  361. {
  362. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  363. if (!attributes_impl || !p_stack_size)
  364. return EINVAL;
  365. *p_stack_size = attributes_impl->m_stack_size;
  366. return 0;
  367. }
  368. int pthread_attr_setstacksize(pthread_attr_t* attributes, size_t stack_size)
  369. {
  370. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  371. if (!attributes_impl)
  372. return EINVAL;
  373. if ((stack_size < PTHREAD_STACK_MIN) || stack_size > highest_reasonable_stack_size)
  374. return EINVAL;
  375. attributes_impl->m_stack_size = stack_size;
  376. #if PTHREAD_DEBUG
  377. dbgprintf("pthread_attr_setstacksize: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  378. attributes_impl,
  379. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  380. attributes_impl->m_schedule_priority,
  381. attributes_impl->m_guard_page_size,
  382. attributes_impl->m_stack_size,
  383. attributes_impl->m_stack_location);
  384. #endif
  385. return 0;
  386. }
  387. int pthread_getschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int* policy, [[maybe_unused]] struct sched_param* param)
  388. {
  389. return 0;
  390. }
  391. int pthread_setschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int policy, [[maybe_unused]] const struct sched_param* param)
  392. {
  393. return 0;
  394. }
  395. int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr)
  396. {
  397. cond->value = 0;
  398. cond->previous = 0;
  399. cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC_COARSE;
  400. return 0;
  401. }
  402. int pthread_cond_destroy(pthread_cond_t*)
  403. {
  404. return 0;
  405. }
  406. static int futex_wait(uint32_t& futex_addr, uint32_t value, const struct timespec* abstime)
  407. {
  408. int saved_errno = errno;
  409. // NOTE: FUTEX_WAIT takes a relative timeout, so use FUTEX_WAIT_BITSET instead!
  410. int rc = futex(&futex_addr, FUTEX_WAIT_BITSET, value, abstime, nullptr, FUTEX_BITSET_MATCH_ANY);
  411. if (rc < 0 && errno == EAGAIN) {
  412. // If we didn't wait, that's not an error
  413. errno = saved_errno;
  414. rc = 0;
  415. }
  416. return rc;
  417. }
  418. static int cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
  419. {
  420. u32 value = cond->value;
  421. cond->previous = value;
  422. pthread_mutex_unlock(mutex);
  423. int rc = futex_wait(cond->value, value, abstime);
  424. pthread_mutex_lock(mutex);
  425. return rc;
  426. }
  427. int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
  428. {
  429. int rc = cond_wait(cond, mutex, nullptr);
  430. ASSERT(rc == 0);
  431. return 0;
  432. }
  433. int pthread_condattr_init(pthread_condattr_t* attr)
  434. {
  435. attr->clockid = CLOCK_MONOTONIC_COARSE;
  436. return 0;
  437. }
  438. int pthread_condattr_destroy(pthread_condattr_t*)
  439. {
  440. return 0;
  441. }
  442. int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock)
  443. {
  444. attr->clockid = clock;
  445. return 0;
  446. }
  447. int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
  448. {
  449. return cond_wait(cond, mutex, abstime);
  450. }
  451. int pthread_cond_signal(pthread_cond_t* cond)
  452. {
  453. u32 value = cond->previous + 1;
  454. cond->value = value;
  455. int rc = futex(&cond->value, FUTEX_WAKE, 1, nullptr, nullptr, 0);
  456. ASSERT(rc >= 0);
  457. return 0;
  458. }
  459. int pthread_cond_broadcast(pthread_cond_t* cond)
  460. {
  461. u32 value = cond->previous + 1;
  462. cond->value = value;
  463. int rc = futex(&cond->value, FUTEX_WAKE, INT32_MAX, nullptr, nullptr, 0);
  464. ASSERT(rc >= 0);
  465. return 0;
  466. }
  467. static constexpr int max_keys = PTHREAD_KEYS_MAX;
  468. typedef void (*KeyDestructor)(void*);
  469. struct KeyTable {
  470. KeyDestructor destructors[max_keys] { nullptr };
  471. int next { 0 };
  472. pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  473. };
  474. struct SpecificTable {
  475. void* values[max_keys] { nullptr };
  476. };
  477. static KeyTable s_keys;
  478. __thread SpecificTable t_specifics;
  479. int pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
  480. {
  481. int ret = 0;
  482. pthread_mutex_lock(&s_keys.mutex);
  483. if (s_keys.next >= max_keys) {
  484. ret = EAGAIN;
  485. } else {
  486. *key = s_keys.next++;
  487. s_keys.destructors[*key] = destructor;
  488. ret = 0;
  489. }
  490. pthread_mutex_unlock(&s_keys.mutex);
  491. return ret;
  492. }
  493. int pthread_key_delete(pthread_key_t key)
  494. {
  495. if (key < 0 || key >= max_keys)
  496. return EINVAL;
  497. pthread_mutex_lock(&s_keys.mutex);
  498. s_keys.destructors[key] = nullptr;
  499. pthread_mutex_unlock(&s_keys.mutex);
  500. return 0;
  501. }
  502. void* pthread_getspecific(pthread_key_t key)
  503. {
  504. if (key < 0)
  505. return nullptr;
  506. if (key >= max_keys)
  507. return nullptr;
  508. return t_specifics.values[key];
  509. }
  510. int pthread_setspecific(pthread_key_t key, const void* value)
  511. {
  512. if (key < 0)
  513. return EINVAL;
  514. if (key >= max_keys)
  515. return EINVAL;
  516. t_specifics.values[key] = const_cast<void*>(value);
  517. return 0;
  518. }
  519. void KeyDestroyer::destroy_for_current_thread()
  520. {
  521. // This function will either be called during exit_thread, for a pthread, or
  522. // during global program shutdown for the main thread.
  523. pthread_mutex_lock(&s_keys.mutex);
  524. size_t num_used_keys = s_keys.next;
  525. // Dr. POSIX accounts for weird key destructors setting their own key again.
  526. // Or even, setting other unrelated keys? Odd, but whatever the Doc says goes.
  527. for (size_t destruct_iteration = 0; destruct_iteration < PTHREAD_DESTRUCTOR_ITERATIONS; ++destruct_iteration) {
  528. bool any_nonnull_destructors = false;
  529. for (size_t key_index = 0; key_index < num_used_keys; ++key_index) {
  530. void* value = exchange(t_specifics.values[key_index], nullptr);
  531. if (value && s_keys.destructors[key_index]) {
  532. any_nonnull_destructors = true;
  533. (*s_keys.destructors[key_index])(value);
  534. }
  535. }
  536. if (!any_nonnull_destructors)
  537. break;
  538. }
  539. pthread_mutex_unlock(&s_keys.mutex);
  540. }
  541. int pthread_setname_np(pthread_t thread, const char* name)
  542. {
  543. if (!name)
  544. return EFAULT;
  545. int rc = syscall(SC_set_thread_name, thread, name, strlen(name));
  546. __RETURN_PTHREAD_ERROR(rc);
  547. }
  548. int pthread_getname_np(pthread_t thread, char* buffer, size_t buffer_size)
  549. {
  550. int rc = syscall(SC_get_thread_name, thread, buffer, buffer_size);
  551. __RETURN_PTHREAD_ERROR(rc);
  552. }
  553. int pthread_setcancelstate([[maybe_unused]] int state, [[maybe_unused]] int* oldstate)
  554. {
  555. TODO();
  556. }
  557. int pthread_setcanceltype([[maybe_unused]] int type, [[maybe_unused]] int* oldtype)
  558. {
  559. TODO();
  560. }
  561. int pthread_equal(pthread_t t1, pthread_t t2)
  562. {
  563. return t1 == t2;
  564. }
  565. int pthread_rwlock_destroy(pthread_rwlock_t* rl)
  566. {
  567. if (!rl)
  568. return 0;
  569. ASSERT_NOT_REACHED();
  570. }
  571. int pthread_rwlock_init(pthread_rwlock_t* __restrict, const pthread_rwlockattr_t* __restrict)
  572. {
  573. ASSERT_NOT_REACHED();
  574. }
  575. int pthread_rwlock_rdlock(pthread_rwlock_t*)
  576. {
  577. ASSERT_NOT_REACHED();
  578. }
  579. int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict, const struct timespec* __restrict)
  580. {
  581. ASSERT_NOT_REACHED();
  582. }
  583. int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict, const struct timespec* __restrict)
  584. {
  585. ASSERT_NOT_REACHED();
  586. }
  587. int pthread_rwlock_tryrdlock(pthread_rwlock_t*)
  588. {
  589. ASSERT_NOT_REACHED();
  590. }
  591. int pthread_rwlock_trywrlock(pthread_rwlock_t*)
  592. {
  593. ASSERT_NOT_REACHED();
  594. }
  595. int pthread_rwlock_unlock(pthread_rwlock_t*)
  596. {
  597. ASSERT_NOT_REACHED();
  598. }
  599. int pthread_rwlock_wrlock(pthread_rwlock_t*)
  600. {
  601. ASSERT_NOT_REACHED();
  602. }
  603. int pthread_rwlockattr_destroy(pthread_rwlockattr_t*)
  604. {
  605. ASSERT_NOT_REACHED();
  606. }
  607. int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* __restrict, int* __restrict)
  608. {
  609. ASSERT_NOT_REACHED();
  610. }
  611. int pthread_rwlockattr_init(pthread_rwlockattr_t*)
  612. {
  613. ASSERT_NOT_REACHED();
  614. }
  615. int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int)
  616. {
  617. ASSERT_NOT_REACHED();
  618. }
  619. int pthread_atfork(void (*)(void), void (*)(void), void (*)(void))
  620. {
  621. ASSERT_NOT_REACHED();
  622. }
  623. } // extern "C"