pthread.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/Atomic.h>
  28. #include <AK/StdLibExtras.h>
  29. #include <Kernel/API/Syscall.h>
  30. #include <limits.h>
  31. #include <pthread.h>
  32. #include <serenity.h>
  33. #include <signal.h>
  34. #include <stdio.h>
  35. #include <string.h>
  36. #include <sys/mman.h>
  37. #include <time.h>
  38. #include <unistd.h>
  39. //#define PTHREAD_DEBUG
  40. namespace {
  41. using PthreadAttrImpl = Syscall::SC_create_thread_params;
  42. struct KeyDestroyer {
  43. ~KeyDestroyer() { destroy_for_current_thread(); }
  44. static void destroy_for_current_thread();
  45. };
  46. } // end anonymous namespace
  47. constexpr size_t required_stack_alignment = 4 * MiB;
  48. constexpr size_t highest_reasonable_guard_size = 32 * PAGE_SIZE;
  49. constexpr size_t highest_reasonable_stack_size = 8 * MiB; // That's the default in Ubuntu?
  50. // Create an RAII object with a global destructor to destroy pthread keys for the main thread.
  51. // Impact of this: Any global object that wants to do something with pthread_getspecific
  52. // in its destructor from the main thread might be in for a nasty surprise.
  53. static KeyDestroyer s_key_destroyer;
  54. #define __RETURN_PTHREAD_ERROR(rc) \
  55. return ((rc) < 0 ? -(rc) : 0)
  56. extern "C" {
  57. static void* pthread_create_helper(void* (*routine)(void*), void* argument)
  58. {
  59. void* ret_val = routine(argument);
  60. pthread_exit(ret_val);
  61. return nullptr;
  62. }
  63. static int create_thread(pthread_t* thread, void* (*entry)(void*), void* argument, PthreadAttrImpl* thread_params)
  64. {
  65. void** stack = (void**)((uintptr_t)thread_params->m_stack_location + thread_params->m_stack_size);
  66. auto push_on_stack = [&](void* data) {
  67. stack--;
  68. *stack = data;
  69. thread_params->m_stack_size -= sizeof(void*);
  70. };
  71. // We set up the stack for pthread_create_helper.
  72. // Note that we need to align the stack to 16B, accounting for
  73. // the fact that we also push 8 bytes.
  74. while (((uintptr_t)stack - 8) % 16 != 0)
  75. push_on_stack(nullptr);
  76. push_on_stack(argument);
  77. push_on_stack((void*)entry);
  78. ASSERT((uintptr_t)stack % 16 == 0);
  79. // Push a fake return address
  80. push_on_stack(nullptr);
  81. int rc = syscall(SC_create_thread, pthread_create_helper, thread_params);
  82. if (rc >= 0)
  83. *thread = rc;
  84. __RETURN_PTHREAD_ERROR(rc);
  85. }
  86. [[noreturn]] static void exit_thread(void* code)
  87. {
  88. KeyDestroyer::destroy_for_current_thread();
  89. syscall(SC_exit_thread, code);
  90. ASSERT_NOT_REACHED();
  91. }
  92. int pthread_self()
  93. {
  94. return gettid();
  95. }
  96. int pthread_create(pthread_t* thread, pthread_attr_t* attributes, void* (*start_routine)(void*), void* argument_to_start_routine)
  97. {
  98. if (!thread)
  99. return -EINVAL;
  100. PthreadAttrImpl default_attributes {};
  101. PthreadAttrImpl** arg_attributes = reinterpret_cast<PthreadAttrImpl**>(attributes);
  102. PthreadAttrImpl* used_attributes = arg_attributes ? *arg_attributes : &default_attributes;
  103. if (!used_attributes->m_stack_location) {
  104. // adjust stack size, user might have called setstacksize, which has no restrictions on size/alignment
  105. if (0 != (used_attributes->m_stack_size % required_stack_alignment))
  106. used_attributes->m_stack_size += required_stack_alignment - (used_attributes->m_stack_size % required_stack_alignment);
  107. used_attributes->m_stack_location = mmap_with_name(nullptr, used_attributes->m_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, 0, 0, "Thread stack");
  108. if (!used_attributes->m_stack_location)
  109. return -1;
  110. }
  111. #ifdef PTHREAD_DEBUG
  112. dbgprintf("pthread_create: Creating thread with attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  113. used_attributes,
  114. (PTHREAD_CREATE_JOINABLE == used_attributes->m_detach_state) ? "joinable" : "detached",
  115. used_attributes->m_schedule_priority,
  116. used_attributes->m_guard_page_size,
  117. used_attributes->m_stack_size,
  118. used_attributes->m_stack_location);
  119. #endif
  120. return create_thread(thread, start_routine, argument_to_start_routine, used_attributes);
  121. }
  122. void pthread_exit(void* value_ptr)
  123. {
  124. exit_thread(value_ptr);
  125. }
  126. int pthread_join(pthread_t thread, void** exit_value_ptr)
  127. {
  128. int rc = syscall(SC_join_thread, thread, exit_value_ptr);
  129. __RETURN_PTHREAD_ERROR(rc);
  130. }
  131. int pthread_detach(pthread_t thread)
  132. {
  133. int rc = syscall(SC_detach_thread, thread);
  134. __RETURN_PTHREAD_ERROR(rc);
  135. }
  136. int pthread_sigmask(int how, const sigset_t* set, sigset_t* old_set)
  137. {
  138. if (sigprocmask(how, set, old_set))
  139. return errno;
  140. return 0;
  141. }
  142. int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
  143. {
  144. mutex->lock = 0;
  145. mutex->owner = 0;
  146. mutex->level = 0;
  147. mutex->type = attributes ? attributes->type : PTHREAD_MUTEX_NORMAL;
  148. return 0;
  149. }
  150. int pthread_mutex_destroy(pthread_mutex_t*)
  151. {
  152. return 0;
  153. }
  154. int pthread_mutex_lock(pthread_mutex_t* mutex)
  155. {
  156. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  157. pthread_t this_thread = pthread_self();
  158. for (;;) {
  159. u32 expected = false;
  160. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  161. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == this_thread) {
  162. mutex->level++;
  163. return 0;
  164. }
  165. sched_yield();
  166. continue;
  167. }
  168. mutex->owner = this_thread;
  169. mutex->level = 0;
  170. return 0;
  171. }
  172. }
  173. int pthread_mutex_trylock(pthread_mutex_t* mutex)
  174. {
  175. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  176. u32 expected = false;
  177. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  178. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == pthread_self()) {
  179. mutex->level++;
  180. return 0;
  181. }
  182. return EBUSY;
  183. }
  184. mutex->owner = pthread_self();
  185. mutex->level = 0;
  186. return 0;
  187. }
  188. int pthread_mutex_unlock(pthread_mutex_t* mutex)
  189. {
  190. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
  191. mutex->level--;
  192. return 0;
  193. }
  194. mutex->owner = 0;
  195. mutex->lock = 0;
  196. return 0;
  197. }
  198. int pthread_mutexattr_init(pthread_mutexattr_t* attr)
  199. {
  200. attr->type = PTHREAD_MUTEX_NORMAL;
  201. return 0;
  202. }
  203. int pthread_mutexattr_destroy(pthread_mutexattr_t*)
  204. {
  205. return 0;
  206. }
  207. int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
  208. {
  209. if (!attr)
  210. return EINVAL;
  211. if (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE)
  212. return EINVAL;
  213. attr->type = type;
  214. return 0;
  215. }
  216. int pthread_attr_init(pthread_attr_t* attributes)
  217. {
  218. auto* impl = new PthreadAttrImpl {};
  219. *attributes = impl;
  220. #ifdef PTHREAD_DEBUG
  221. dbgprintf("pthread_attr_init: New thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  222. impl,
  223. (PTHREAD_CREATE_JOINABLE == impl->m_detach_state) ? "joinable" : "detached",
  224. impl->m_schedule_priority,
  225. impl->m_guard_page_size,
  226. impl->m_stack_size,
  227. impl->m_stack_location);
  228. #endif
  229. return 0;
  230. }
  231. int pthread_attr_destroy(pthread_attr_t* attributes)
  232. {
  233. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  234. delete attributes_impl;
  235. return 0;
  236. }
  237. int pthread_attr_getdetachstate(const pthread_attr_t* attributes, int* p_detach_state)
  238. {
  239. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  240. if (!attributes_impl || !p_detach_state)
  241. return EINVAL;
  242. *p_detach_state = attributes_impl->m_detach_state;
  243. return 0;
  244. }
  245. int pthread_attr_setdetachstate(pthread_attr_t* attributes, int detach_state)
  246. {
  247. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  248. if (!attributes_impl)
  249. return EINVAL;
  250. if (detach_state != PTHREAD_CREATE_JOINABLE && detach_state != PTHREAD_CREATE_DETACHED)
  251. return EINVAL;
  252. attributes_impl->m_detach_state = detach_state;
  253. #ifdef PTHREAD_DEBUG
  254. dbgprintf("pthread_attr_setdetachstate: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  255. attributes_impl,
  256. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  257. attributes_impl->m_schedule_priority,
  258. attributes_impl->m_guard_page_size,
  259. attributes_impl->m_stack_size,
  260. attributes_impl->m_stack_location);
  261. #endif
  262. return 0;
  263. }
  264. int pthread_attr_getguardsize(const pthread_attr_t* attributes, size_t* p_guard_size)
  265. {
  266. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  267. if (!attributes_impl || !p_guard_size)
  268. return EINVAL;
  269. *p_guard_size = attributes_impl->m_reported_guard_page_size;
  270. return 0;
  271. }
  272. int pthread_attr_setguardsize(pthread_attr_t* attributes, size_t guard_size)
  273. {
  274. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  275. if (!attributes_impl)
  276. return EINVAL;
  277. size_t actual_guard_size = guard_size;
  278. // round up
  279. if (0 != (guard_size % PAGE_SIZE))
  280. actual_guard_size += PAGE_SIZE - (guard_size % PAGE_SIZE);
  281. // what is the user even doing?
  282. if (actual_guard_size > highest_reasonable_guard_size) {
  283. return EINVAL;
  284. }
  285. attributes_impl->m_guard_page_size = actual_guard_size;
  286. attributes_impl->m_reported_guard_page_size = guard_size; // POSIX, why?
  287. #ifdef PTHREAD_DEBUG
  288. dbgprintf("pthread_attr_setguardsize: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  289. attributes_impl,
  290. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  291. attributes_impl->m_schedule_priority,
  292. attributes_impl->m_guard_page_size,
  293. attributes_impl->m_stack_size,
  294. attributes_impl->m_stack_location);
  295. #endif
  296. return 0;
  297. }
  298. int pthread_attr_getschedparam(const pthread_attr_t* attributes, struct sched_param* p_sched_param)
  299. {
  300. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  301. if (!attributes_impl || !p_sched_param)
  302. return EINVAL;
  303. p_sched_param->sched_priority = attributes_impl->m_schedule_priority;
  304. return 0;
  305. }
  306. int pthread_attr_setschedparam(pthread_attr_t* attributes, const struct sched_param* p_sched_param)
  307. {
  308. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  309. if (!attributes_impl || !p_sched_param)
  310. return EINVAL;
  311. if (p_sched_param->sched_priority < THREAD_PRIORITY_MIN || p_sched_param->sched_priority > THREAD_PRIORITY_MAX)
  312. return ENOTSUP;
  313. attributes_impl->m_schedule_priority = p_sched_param->sched_priority;
  314. #ifdef PTHREAD_DEBUG
  315. dbgprintf("pthread_attr_setschedparam: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  316. attributes_impl,
  317. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  318. attributes_impl->m_schedule_priority,
  319. attributes_impl->m_guard_page_size,
  320. attributes_impl->m_stack_size,
  321. attributes_impl->m_stack_location);
  322. #endif
  323. return 0;
  324. }
  325. int pthread_attr_getstack(const pthread_attr_t* attributes, void** p_stack_ptr, size_t* p_stack_size)
  326. {
  327. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  328. if (!attributes_impl || !p_stack_ptr || !p_stack_size)
  329. return EINVAL;
  330. *p_stack_ptr = attributes_impl->m_stack_location;
  331. *p_stack_size = attributes_impl->m_stack_size;
  332. return 0;
  333. }
  334. int pthread_attr_setstack(pthread_attr_t* attributes, void* p_stack, size_t stack_size)
  335. {
  336. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  337. if (!attributes_impl || !p_stack)
  338. return EINVAL;
  339. // Check for required alignment on size
  340. if (0 != (stack_size % required_stack_alignment))
  341. return EINVAL;
  342. // FIXME: Check for required alignment on pointer?
  343. // FIXME: "[EACCES] The stack page(s) described by stackaddr and stacksize are not both readable and writable by the thread."
  344. // Have to check that the whole range is mapped to this process/thread? Can we defer this to create_thread?
  345. attributes_impl->m_stack_size = stack_size;
  346. attributes_impl->m_stack_location = p_stack;
  347. #ifdef PTHREAD_DEBUG
  348. dbgprintf("pthread_attr_setstack: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  349. attributes_impl,
  350. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  351. attributes_impl->m_schedule_priority,
  352. attributes_impl->m_guard_page_size,
  353. attributes_impl->m_stack_size,
  354. attributes_impl->m_stack_location);
  355. #endif
  356. return 0;
  357. }
  358. int pthread_attr_getstacksize(const pthread_attr_t* attributes, size_t* p_stack_size)
  359. {
  360. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  361. if (!attributes_impl || !p_stack_size)
  362. return EINVAL;
  363. *p_stack_size = attributes_impl->m_stack_size;
  364. return 0;
  365. }
  366. int pthread_attr_setstacksize(pthread_attr_t* attributes, size_t stack_size)
  367. {
  368. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  369. if (!attributes_impl)
  370. return EINVAL;
  371. if ((stack_size < PTHREAD_STACK_MIN) || stack_size > highest_reasonable_stack_size)
  372. return EINVAL;
  373. attributes_impl->m_stack_size = stack_size;
  374. #ifdef PTHREAD_DEBUG
  375. dbgprintf("pthread_attr_setstacksize: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  376. attributes_impl,
  377. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  378. attributes_impl->m_schedule_priority,
  379. attributes_impl->m_guard_page_size,
  380. attributes_impl->m_stack_size,
  381. attributes_impl->m_stack_location);
  382. #endif
  383. return 0;
  384. }
  385. int pthread_getschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int* policy, [[maybe_unused]] struct sched_param* param)
  386. {
  387. return 0;
  388. }
  389. int pthread_setschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int policy, [[maybe_unused]] const struct sched_param* param)
  390. {
  391. return 0;
  392. }
  393. int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr)
  394. {
  395. cond->value = 0;
  396. cond->previous = 0;
  397. cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC_COARSE;
  398. return 0;
  399. }
  400. int pthread_cond_destroy(pthread_cond_t*)
  401. {
  402. return 0;
  403. }
  404. static int futex_wait(uint32_t& futex_addr, uint32_t value, const struct timespec* abstime)
  405. {
  406. int saved_errno = errno;
  407. // NOTE: FUTEX_WAIT takes a relative timeout, so use FUTEX_WAIT_BITSET instead!
  408. int rc = futex(&futex_addr, FUTEX_WAIT_BITSET, value, abstime, nullptr, FUTEX_BITSET_MATCH_ANY);
  409. if (rc < 0 && errno == EAGAIN) {
  410. // If we didn't wait, that's not an error
  411. errno = saved_errno;
  412. rc = 0;
  413. }
  414. return rc;
  415. }
  416. static int cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
  417. {
  418. u32 value = cond->value;
  419. cond->previous = value;
  420. pthread_mutex_unlock(mutex);
  421. int rc = futex_wait(cond->value, value, abstime);
  422. pthread_mutex_lock(mutex);
  423. return rc;
  424. }
  425. int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
  426. {
  427. int rc = cond_wait(cond, mutex, nullptr);
  428. ASSERT(rc == 0);
  429. return 0;
  430. }
  431. int pthread_condattr_init(pthread_condattr_t* attr)
  432. {
  433. attr->clockid = CLOCK_MONOTONIC_COARSE;
  434. return 0;
  435. }
  436. int pthread_condattr_destroy(pthread_condattr_t*)
  437. {
  438. return 0;
  439. }
  440. int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock)
  441. {
  442. attr->clockid = clock;
  443. return 0;
  444. }
  445. int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
  446. {
  447. return cond_wait(cond, mutex, abstime);
  448. }
  449. int pthread_cond_signal(pthread_cond_t* cond)
  450. {
  451. u32 value = cond->previous + 1;
  452. cond->value = value;
  453. int rc = futex(&cond->value, FUTEX_WAKE, 1, nullptr, nullptr, 0);
  454. ASSERT(rc >= 0);
  455. return 0;
  456. }
  457. int pthread_cond_broadcast(pthread_cond_t* cond)
  458. {
  459. u32 value = cond->previous + 1;
  460. cond->value = value;
  461. int rc = futex(&cond->value, FUTEX_WAKE, INT32_MAX, nullptr, nullptr, 0);
  462. ASSERT(rc >= 0);
  463. return 0;
  464. }
  465. static constexpr int max_keys = PTHREAD_KEYS_MAX;
  466. typedef void (*KeyDestructor)(void*);
  467. struct KeyTable {
  468. KeyDestructor destructors[max_keys] { nullptr };
  469. int next { 0 };
  470. pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  471. };
  472. struct SpecificTable {
  473. void* values[max_keys] { nullptr };
  474. };
  475. static KeyTable s_keys;
  476. __thread SpecificTable t_specifics;
  477. int pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
  478. {
  479. int ret = 0;
  480. pthread_mutex_lock(&s_keys.mutex);
  481. if (s_keys.next >= max_keys) {
  482. ret = EAGAIN;
  483. } else {
  484. *key = s_keys.next++;
  485. s_keys.destructors[*key] = destructor;
  486. ret = 0;
  487. }
  488. pthread_mutex_unlock(&s_keys.mutex);
  489. return ret;
  490. }
  491. int pthread_key_delete(pthread_key_t key)
  492. {
  493. if (key < 0 || key >= max_keys)
  494. return EINVAL;
  495. pthread_mutex_lock(&s_keys.mutex);
  496. s_keys.destructors[key] = nullptr;
  497. pthread_mutex_unlock(&s_keys.mutex);
  498. return 0;
  499. }
  500. void* pthread_getspecific(pthread_key_t key)
  501. {
  502. if (key < 0)
  503. return nullptr;
  504. if (key >= max_keys)
  505. return nullptr;
  506. return t_specifics.values[key];
  507. }
  508. int pthread_setspecific(pthread_key_t key, const void* value)
  509. {
  510. if (key < 0)
  511. return EINVAL;
  512. if (key >= max_keys)
  513. return EINVAL;
  514. t_specifics.values[key] = const_cast<void*>(value);
  515. return 0;
  516. }
  517. void KeyDestroyer::destroy_for_current_thread()
  518. {
  519. // This function will either be called during exit_thread, for a pthread, or
  520. // during global program shutdown for the main thread.
  521. pthread_mutex_lock(&s_keys.mutex);
  522. size_t num_used_keys = s_keys.next;
  523. // Dr. POSIX accounts for weird key destructors setting their own key again.
  524. // Or even, setting other unrelated keys? Odd, but whatever the Doc says goes.
  525. for (size_t destruct_iteration = 0; destruct_iteration < PTHREAD_DESTRUCTOR_ITERATIONS; ++destruct_iteration) {
  526. bool any_nonnull_destructors = false;
  527. for (size_t key_index = 0; key_index < num_used_keys; ++key_index) {
  528. void* value = exchange(t_specifics.values[key_index], nullptr);
  529. if (value && s_keys.destructors[key_index]) {
  530. any_nonnull_destructors = true;
  531. (*s_keys.destructors[key_index])(value);
  532. }
  533. }
  534. if (!any_nonnull_destructors)
  535. break;
  536. }
  537. pthread_mutex_unlock(&s_keys.mutex);
  538. }
  539. int pthread_setname_np(pthread_t thread, const char* name)
  540. {
  541. if (!name)
  542. return EFAULT;
  543. int rc = syscall(SC_set_thread_name, thread, name, strlen(name));
  544. __RETURN_PTHREAD_ERROR(rc);
  545. }
  546. int pthread_getname_np(pthread_t thread, char* buffer, size_t buffer_size)
  547. {
  548. int rc = syscall(SC_get_thread_name, thread, buffer, buffer_size);
  549. __RETURN_PTHREAD_ERROR(rc);
  550. }
  551. int pthread_setcancelstate([[maybe_unused]] int state, [[maybe_unused]] int* oldstate)
  552. {
  553. TODO();
  554. }
  555. int pthread_setcanceltype([[maybe_unused]] int type, [[maybe_unused]] int* oldtype)
  556. {
  557. TODO();
  558. }
  559. int pthread_equal(pthread_t t1, pthread_t t2)
  560. {
  561. return t1 == t2;
  562. }
  563. } // extern "C"