pthread.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/Atomic.h>
  28. #include <AK/Debug.h>
  29. #include <AK/StdLibExtras.h>
  30. #include <Kernel/API/Syscall.h>
  31. #include <LibSystem/syscall.h>
  32. #include <bits/pthread_integration.h>
  33. #include <limits.h>
  34. #include <pthread.h>
  35. #include <serenity.h>
  36. #include <signal.h>
  37. #include <stdio.h>
  38. #include <string.h>
  39. #include <sys/mman.h>
  40. #include <syscall.h>
  41. #include <time.h>
  42. #include <unistd.h>
  43. namespace {
  44. using PthreadAttrImpl = Syscall::SC_create_thread_params;
  45. struct KeyDestroyer {
  46. ~KeyDestroyer() { destroy_for_current_thread(); }
  47. static void destroy_for_current_thread();
  48. };
  49. } // end anonymous namespace
  50. constexpr size_t required_stack_alignment = 4 * MiB;
  51. constexpr size_t highest_reasonable_guard_size = 32 * PAGE_SIZE;
  52. constexpr size_t highest_reasonable_stack_size = 8 * MiB; // That's the default in Ubuntu?
  53. // Create an RAII object with a global destructor to destroy pthread keys for the main thread.
  54. // Impact of this: Any global object that wants to do something with pthread_getspecific
  55. // in its destructor from the main thread might be in for a nasty surprise.
  56. static KeyDestroyer s_key_destroyer;
  57. #define __RETURN_PTHREAD_ERROR(rc) \
  58. return ((rc) < 0 ? -(rc) : 0)
  59. extern "C" {
  60. static void* pthread_create_helper(void* (*routine)(void*), void* argument)
  61. {
  62. void* ret_val = routine(argument);
  63. pthread_exit(ret_val);
  64. return nullptr;
  65. }
  66. static int create_thread(pthread_t* thread, void* (*entry)(void*), void* argument, PthreadAttrImpl* thread_params)
  67. {
  68. void** stack = (void**)((uintptr_t)thread_params->m_stack_location + thread_params->m_stack_size);
  69. auto push_on_stack = [&](void* data) {
  70. stack--;
  71. *stack = data;
  72. thread_params->m_stack_size -= sizeof(void*);
  73. };
  74. // We set up the stack for pthread_create_helper.
  75. // Note that we need to align the stack to 16B, accounting for
  76. // the fact that we also push 8 bytes.
  77. while (((uintptr_t)stack - 8) % 16 != 0)
  78. push_on_stack(nullptr);
  79. push_on_stack(argument);
  80. push_on_stack((void*)entry);
  81. ASSERT((uintptr_t)stack % 16 == 0);
  82. // Push a fake return address
  83. push_on_stack(nullptr);
  84. int rc = syscall(SC_create_thread, pthread_create_helper, thread_params);
  85. if (rc >= 0)
  86. *thread = rc;
  87. __RETURN_PTHREAD_ERROR(rc);
  88. }
  89. [[noreturn]] static void exit_thread(void* code)
  90. {
  91. KeyDestroyer::destroy_for_current_thread();
  92. syscall(SC_exit_thread, code);
  93. ASSERT_NOT_REACHED();
  94. }
  95. int pthread_self()
  96. {
  97. return __pthread_self();
  98. }
  99. int pthread_create(pthread_t* thread, pthread_attr_t* attributes, void* (*start_routine)(void*), void* argument_to_start_routine)
  100. {
  101. if (!thread)
  102. return -EINVAL;
  103. PthreadAttrImpl default_attributes {};
  104. PthreadAttrImpl** arg_attributes = reinterpret_cast<PthreadAttrImpl**>(attributes);
  105. PthreadAttrImpl* used_attributes = arg_attributes ? *arg_attributes : &default_attributes;
  106. if (!used_attributes->m_stack_location) {
  107. // adjust stack size, user might have called setstacksize, which has no restrictions on size/alignment
  108. if (0 != (used_attributes->m_stack_size % required_stack_alignment))
  109. used_attributes->m_stack_size += required_stack_alignment - (used_attributes->m_stack_size % required_stack_alignment);
  110. used_attributes->m_stack_location = mmap_with_name(nullptr, used_attributes->m_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, 0, 0, "Thread stack");
  111. if (!used_attributes->m_stack_location)
  112. return -1;
  113. }
  114. #if PTHREAD_DEBUG
  115. dbgprintf("pthread_create: Creating thread with attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  116. used_attributes,
  117. (PTHREAD_CREATE_JOINABLE == used_attributes->m_detach_state) ? "joinable" : "detached",
  118. used_attributes->m_schedule_priority,
  119. used_attributes->m_guard_page_size,
  120. used_attributes->m_stack_size,
  121. used_attributes->m_stack_location);
  122. #endif
  123. return create_thread(thread, start_routine, argument_to_start_routine, used_attributes);
  124. }
  125. void pthread_exit(void* value_ptr)
  126. {
  127. exit_thread(value_ptr);
  128. }
  129. int pthread_join(pthread_t thread, void** exit_value_ptr)
  130. {
  131. int rc = syscall(SC_join_thread, thread, exit_value_ptr);
  132. __RETURN_PTHREAD_ERROR(rc);
  133. }
  134. int pthread_detach(pthread_t thread)
  135. {
  136. int rc = syscall(SC_detach_thread, thread);
  137. __RETURN_PTHREAD_ERROR(rc);
  138. }
  139. int pthread_sigmask(int how, const sigset_t* set, sigset_t* old_set)
  140. {
  141. if (sigprocmask(how, set, old_set))
  142. return errno;
  143. return 0;
  144. }
  145. int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
  146. {
  147. return __pthread_mutex_init(mutex, attributes);
  148. }
  149. int pthread_mutex_destroy(pthread_mutex_t*)
  150. {
  151. return 0;
  152. }
  153. int pthread_mutex_lock(pthread_mutex_t* mutex)
  154. {
  155. return __pthread_mutex_lock(mutex);
  156. }
  157. int pthread_mutex_trylock(pthread_mutex_t* mutex)
  158. {
  159. auto& atomic = reinterpret_cast<Atomic<u32>&>(mutex->lock);
  160. u32 expected = false;
  161. if (!atomic.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
  162. if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == pthread_self()) {
  163. mutex->level++;
  164. return 0;
  165. }
  166. return EBUSY;
  167. }
  168. mutex->owner = pthread_self();
  169. mutex->level = 0;
  170. return 0;
  171. }
  172. int pthread_mutex_unlock(pthread_mutex_t* mutex)
  173. {
  174. return __pthread_mutex_unlock(mutex);
  175. }
  176. int pthread_mutexattr_init(pthread_mutexattr_t* attr)
  177. {
  178. attr->type = PTHREAD_MUTEX_NORMAL;
  179. return 0;
  180. }
  181. int pthread_mutexattr_destroy(pthread_mutexattr_t*)
  182. {
  183. return 0;
  184. }
  185. int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
  186. {
  187. if (!attr)
  188. return EINVAL;
  189. if (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE)
  190. return EINVAL;
  191. attr->type = type;
  192. return 0;
  193. }
  194. int pthread_attr_init(pthread_attr_t* attributes)
  195. {
  196. auto* impl = new PthreadAttrImpl {};
  197. *attributes = impl;
  198. #if PTHREAD_DEBUG
  199. dbgprintf("pthread_attr_init: New thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  200. impl,
  201. (PTHREAD_CREATE_JOINABLE == impl->m_detach_state) ? "joinable" : "detached",
  202. impl->m_schedule_priority,
  203. impl->m_guard_page_size,
  204. impl->m_stack_size,
  205. impl->m_stack_location);
  206. #endif
  207. return 0;
  208. }
  209. int pthread_attr_destroy(pthread_attr_t* attributes)
  210. {
  211. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  212. delete attributes_impl;
  213. return 0;
  214. }
  215. int pthread_attr_getdetachstate(const pthread_attr_t* attributes, int* p_detach_state)
  216. {
  217. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  218. if (!attributes_impl || !p_detach_state)
  219. return EINVAL;
  220. *p_detach_state = attributes_impl->m_detach_state;
  221. return 0;
  222. }
  223. int pthread_attr_setdetachstate(pthread_attr_t* attributes, int detach_state)
  224. {
  225. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  226. if (!attributes_impl)
  227. return EINVAL;
  228. if (detach_state != PTHREAD_CREATE_JOINABLE && detach_state != PTHREAD_CREATE_DETACHED)
  229. return EINVAL;
  230. attributes_impl->m_detach_state = detach_state;
  231. #if PTHREAD_DEBUG
  232. dbgprintf("pthread_attr_setdetachstate: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  233. attributes_impl,
  234. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  235. attributes_impl->m_schedule_priority,
  236. attributes_impl->m_guard_page_size,
  237. attributes_impl->m_stack_size,
  238. attributes_impl->m_stack_location);
  239. #endif
  240. return 0;
  241. }
  242. int pthread_attr_getguardsize(const pthread_attr_t* attributes, size_t* p_guard_size)
  243. {
  244. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  245. if (!attributes_impl || !p_guard_size)
  246. return EINVAL;
  247. *p_guard_size = attributes_impl->m_reported_guard_page_size;
  248. return 0;
  249. }
  250. int pthread_attr_setguardsize(pthread_attr_t* attributes, size_t guard_size)
  251. {
  252. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  253. if (!attributes_impl)
  254. return EINVAL;
  255. size_t actual_guard_size = guard_size;
  256. // round up
  257. if (0 != (guard_size % PAGE_SIZE))
  258. actual_guard_size += PAGE_SIZE - (guard_size % PAGE_SIZE);
  259. // what is the user even doing?
  260. if (actual_guard_size > highest_reasonable_guard_size) {
  261. return EINVAL;
  262. }
  263. attributes_impl->m_guard_page_size = actual_guard_size;
  264. attributes_impl->m_reported_guard_page_size = guard_size; // POSIX, why?
  265. #if PTHREAD_DEBUG
  266. dbgprintf("pthread_attr_setguardsize: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  267. attributes_impl,
  268. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  269. attributes_impl->m_schedule_priority,
  270. attributes_impl->m_guard_page_size,
  271. attributes_impl->m_stack_size,
  272. attributes_impl->m_stack_location);
  273. #endif
  274. return 0;
  275. }
  276. int pthread_attr_getschedparam(const pthread_attr_t* attributes, struct sched_param* p_sched_param)
  277. {
  278. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  279. if (!attributes_impl || !p_sched_param)
  280. return EINVAL;
  281. p_sched_param->sched_priority = attributes_impl->m_schedule_priority;
  282. return 0;
  283. }
  284. int pthread_attr_setschedparam(pthread_attr_t* attributes, const struct sched_param* p_sched_param)
  285. {
  286. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  287. if (!attributes_impl || !p_sched_param)
  288. return EINVAL;
  289. if (p_sched_param->sched_priority < THREAD_PRIORITY_MIN || p_sched_param->sched_priority > THREAD_PRIORITY_MAX)
  290. return ENOTSUP;
  291. attributes_impl->m_schedule_priority = p_sched_param->sched_priority;
  292. #if PTHREAD_DEBUG
  293. dbgprintf("pthread_attr_setschedparam: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  294. attributes_impl,
  295. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  296. attributes_impl->m_schedule_priority,
  297. attributes_impl->m_guard_page_size,
  298. attributes_impl->m_stack_size,
  299. attributes_impl->m_stack_location);
  300. #endif
  301. return 0;
  302. }
  303. int pthread_attr_getstack(const pthread_attr_t* attributes, void** p_stack_ptr, size_t* p_stack_size)
  304. {
  305. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  306. if (!attributes_impl || !p_stack_ptr || !p_stack_size)
  307. return EINVAL;
  308. *p_stack_ptr = attributes_impl->m_stack_location;
  309. *p_stack_size = attributes_impl->m_stack_size;
  310. return 0;
  311. }
  312. int pthread_attr_setstack(pthread_attr_t* attributes, void* p_stack, size_t stack_size)
  313. {
  314. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  315. if (!attributes_impl || !p_stack)
  316. return EINVAL;
  317. // Check for required alignment on size
  318. if (0 != (stack_size % required_stack_alignment))
  319. return EINVAL;
  320. // FIXME: Check for required alignment on pointer?
  321. // FIXME: "[EACCES] The stack page(s) described by stackaddr and stacksize are not both readable and writable by the thread."
  322. // Have to check that the whole range is mapped to this process/thread? Can we defer this to create_thread?
  323. attributes_impl->m_stack_size = stack_size;
  324. attributes_impl->m_stack_location = p_stack;
  325. #if PTHREAD_DEBUG
  326. dbgprintf("pthread_attr_setstack: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  327. attributes_impl,
  328. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  329. attributes_impl->m_schedule_priority,
  330. attributes_impl->m_guard_page_size,
  331. attributes_impl->m_stack_size,
  332. attributes_impl->m_stack_location);
  333. #endif
  334. return 0;
  335. }
  336. int pthread_attr_getstacksize(const pthread_attr_t* attributes, size_t* p_stack_size)
  337. {
  338. auto* attributes_impl = *(reinterpret_cast<const PthreadAttrImpl* const*>(attributes));
  339. if (!attributes_impl || !p_stack_size)
  340. return EINVAL;
  341. *p_stack_size = attributes_impl->m_stack_size;
  342. return 0;
  343. }
  344. int pthread_attr_setstacksize(pthread_attr_t* attributes, size_t stack_size)
  345. {
  346. auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
  347. if (!attributes_impl)
  348. return EINVAL;
  349. if ((stack_size < PTHREAD_STACK_MIN) || stack_size > highest_reasonable_stack_size)
  350. return EINVAL;
  351. attributes_impl->m_stack_size = stack_size;
  352. #if PTHREAD_DEBUG
  353. dbgprintf("pthread_attr_setstacksize: Thread attributes at %p, detach state %s, priority %d, guard page size %d, stack size %d, stack location %p\n",
  354. attributes_impl,
  355. (PTHREAD_CREATE_JOINABLE == attributes_impl->m_detach_state) ? "joinable" : "detached",
  356. attributes_impl->m_schedule_priority,
  357. attributes_impl->m_guard_page_size,
  358. attributes_impl->m_stack_size,
  359. attributes_impl->m_stack_location);
  360. #endif
  361. return 0;
  362. }
  363. int pthread_getschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int* policy, [[maybe_unused]] struct sched_param* param)
  364. {
  365. return 0;
  366. }
  367. int pthread_setschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int policy, [[maybe_unused]] const struct sched_param* param)
  368. {
  369. return 0;
  370. }
  371. int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr)
  372. {
  373. cond->value = 0;
  374. cond->previous = 0;
  375. cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC_COARSE;
  376. return 0;
  377. }
  378. int pthread_cond_destroy(pthread_cond_t*)
  379. {
  380. return 0;
  381. }
  382. static int futex_wait(uint32_t& futex_addr, uint32_t value, const struct timespec* abstime)
  383. {
  384. int saved_errno = errno;
  385. // NOTE: FUTEX_WAIT takes a relative timeout, so use FUTEX_WAIT_BITSET instead!
  386. int rc = futex(&futex_addr, FUTEX_WAIT_BITSET, value, abstime, nullptr, FUTEX_BITSET_MATCH_ANY);
  387. if (rc < 0 && errno == EAGAIN) {
  388. // If we didn't wait, that's not an error
  389. errno = saved_errno;
  390. rc = 0;
  391. }
  392. return rc;
  393. }
  394. static int cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
  395. {
  396. u32 value = cond->value;
  397. cond->previous = value;
  398. pthread_mutex_unlock(mutex);
  399. int rc = futex_wait(cond->value, value, abstime);
  400. pthread_mutex_lock(mutex);
  401. return rc;
  402. }
  403. int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
  404. {
  405. int rc = cond_wait(cond, mutex, nullptr);
  406. ASSERT(rc == 0);
  407. return 0;
  408. }
  409. int pthread_condattr_init(pthread_condattr_t* attr)
  410. {
  411. attr->clockid = CLOCK_MONOTONIC_COARSE;
  412. return 0;
  413. }
  414. int pthread_condattr_destroy(pthread_condattr_t*)
  415. {
  416. return 0;
  417. }
  418. int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock)
  419. {
  420. attr->clockid = clock;
  421. return 0;
  422. }
  423. int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
  424. {
  425. return cond_wait(cond, mutex, abstime);
  426. }
  427. int pthread_cond_signal(pthread_cond_t* cond)
  428. {
  429. u32 value = cond->previous + 1;
  430. cond->value = value;
  431. int rc = futex(&cond->value, FUTEX_WAKE, 1, nullptr, nullptr, 0);
  432. ASSERT(rc >= 0);
  433. return 0;
  434. }
  435. int pthread_cond_broadcast(pthread_cond_t* cond)
  436. {
  437. u32 value = cond->previous + 1;
  438. cond->value = value;
  439. int rc = futex(&cond->value, FUTEX_WAKE, INT32_MAX, nullptr, nullptr, 0);
  440. ASSERT(rc >= 0);
  441. return 0;
  442. }
  443. static constexpr int max_keys = PTHREAD_KEYS_MAX;
  444. typedef void (*KeyDestructor)(void*);
  445. struct KeyTable {
  446. KeyDestructor destructors[max_keys] { nullptr };
  447. int next { 0 };
  448. pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  449. };
  450. struct SpecificTable {
  451. void* values[max_keys] { nullptr };
  452. };
  453. static KeyTable s_keys;
  454. __thread SpecificTable t_specifics;
  455. int pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
  456. {
  457. int ret = 0;
  458. pthread_mutex_lock(&s_keys.mutex);
  459. if (s_keys.next >= max_keys) {
  460. ret = EAGAIN;
  461. } else {
  462. *key = s_keys.next++;
  463. s_keys.destructors[*key] = destructor;
  464. ret = 0;
  465. }
  466. pthread_mutex_unlock(&s_keys.mutex);
  467. return ret;
  468. }
  469. int pthread_key_delete(pthread_key_t key)
  470. {
  471. if (key < 0 || key >= max_keys)
  472. return EINVAL;
  473. pthread_mutex_lock(&s_keys.mutex);
  474. s_keys.destructors[key] = nullptr;
  475. pthread_mutex_unlock(&s_keys.mutex);
  476. return 0;
  477. }
  478. void* pthread_getspecific(pthread_key_t key)
  479. {
  480. if (key < 0)
  481. return nullptr;
  482. if (key >= max_keys)
  483. return nullptr;
  484. return t_specifics.values[key];
  485. }
  486. int pthread_setspecific(pthread_key_t key, const void* value)
  487. {
  488. if (key < 0)
  489. return EINVAL;
  490. if (key >= max_keys)
  491. return EINVAL;
  492. t_specifics.values[key] = const_cast<void*>(value);
  493. return 0;
  494. }
  495. void KeyDestroyer::destroy_for_current_thread()
  496. {
  497. // This function will either be called during exit_thread, for a pthread, or
  498. // during global program shutdown for the main thread.
  499. pthread_mutex_lock(&s_keys.mutex);
  500. size_t num_used_keys = s_keys.next;
  501. // Dr. POSIX accounts for weird key destructors setting their own key again.
  502. // Or even, setting other unrelated keys? Odd, but whatever the Doc says goes.
  503. for (size_t destruct_iteration = 0; destruct_iteration < PTHREAD_DESTRUCTOR_ITERATIONS; ++destruct_iteration) {
  504. bool any_nonnull_destructors = false;
  505. for (size_t key_index = 0; key_index < num_used_keys; ++key_index) {
  506. void* value = exchange(t_specifics.values[key_index], nullptr);
  507. if (value && s_keys.destructors[key_index]) {
  508. any_nonnull_destructors = true;
  509. (*s_keys.destructors[key_index])(value);
  510. }
  511. }
  512. if (!any_nonnull_destructors)
  513. break;
  514. }
  515. pthread_mutex_unlock(&s_keys.mutex);
  516. }
  517. int pthread_setname_np(pthread_t thread, const char* name)
  518. {
  519. if (!name)
  520. return EFAULT;
  521. int rc = syscall(SC_set_thread_name, thread, name, strlen(name));
  522. __RETURN_PTHREAD_ERROR(rc);
  523. }
  524. int pthread_getname_np(pthread_t thread, char* buffer, size_t buffer_size)
  525. {
  526. int rc = syscall(SC_get_thread_name, thread, buffer, buffer_size);
  527. __RETURN_PTHREAD_ERROR(rc);
  528. }
  529. int pthread_setcancelstate([[maybe_unused]] int state, [[maybe_unused]] int* oldstate)
  530. {
  531. TODO();
  532. }
  533. int pthread_setcanceltype([[maybe_unused]] int type, [[maybe_unused]] int* oldtype)
  534. {
  535. TODO();
  536. }
  537. int pthread_equal(pthread_t t1, pthread_t t2)
  538. {
  539. return t1 == t2;
  540. }
  541. // FIXME: Use the fancy futex mechanism above to write an rw lock.
  542. // For the time being, let's just use a less-than-good lock to get things working.
  543. int pthread_rwlock_destroy(pthread_rwlock_t* rl)
  544. {
  545. if (!rl)
  546. return 0;
  547. return 0;
  548. }
  549. // In a very non-straightforward way, this value is composed of two 32-bit integers
  550. // the top 32 bits are reserved for the ID of write-locking thread (if any)
  551. // and the bottom 32 bits are:
  552. // top 2 bits (30,31): reader wake mask, writer wake mask
  553. // middle 16 bits: information
  554. // bit 16: someone is waiting to write
  555. // bit 17: locked for write
  556. // bottom 16 bits (0..15): reader count
  557. constexpr static u32 reader_wake_mask = 1 << 30;
  558. constexpr static u32 writer_wake_mask = 1 << 31;
  559. constexpr static u32 writer_locked_mask = 1 << 17;
  560. constexpr static u32 writer_intent_mask = 1 << 16;
  561. int pthread_rwlock_init(pthread_rwlock_t* __restrict lockp, const pthread_rwlockattr_t* __restrict attr)
  562. {
  563. // Just ignore the attributes. use defaults for now.
  564. (void)attr;
  565. // No readers, no writer, not locked at all.
  566. *lockp = 0;
  567. return 0;
  568. }
  569. // Note that this function does not care about the top 32 bits at all.
  570. static int rwlock_rdlock_maybe_timed(u32* lockp, const struct timespec* timeout = nullptr, bool only_once = false, int value_if_timeout = -1, int value_if_okay = -2)
  571. {
  572. auto current = AK::atomic_load(lockp);
  573. for (; !only_once;) {
  574. // First, see if this is locked for writing
  575. // if it's not, try to add to the counter.
  576. // If someone is waiting to write, and there is one or no other readers, let them have the lock.
  577. if (!(current & writer_locked_mask)) {
  578. auto count = (u16)current;
  579. if (!(current & writer_intent_mask) || count > 1) {
  580. ++count;
  581. auto desired = (current << 16) | count;
  582. auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
  583. if (!did_exchange)
  584. continue; // tough luck, try again.
  585. return value_if_okay;
  586. }
  587. }
  588. // If no one else is waiting for the read wake bit, set it.
  589. if (!(current & reader_wake_mask)) {
  590. auto desired = current | reader_wake_mask;
  591. auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
  592. if (!did_exchange)
  593. continue; // Something interesting happened!
  594. current = desired;
  595. }
  596. // Seems like someone is writing (or is interested in writing and we let them have the lock)
  597. // wait until they're done.
  598. auto rc = futex(lockp, FUTEX_WAIT_BITSET, current, timeout, nullptr, reader_wake_mask);
  599. if (rc < 0 && errno == ETIMEDOUT && timeout) {
  600. return value_if_timeout;
  601. }
  602. if (rc < 0 && errno != EAGAIN) {
  603. // Something broke. let's just bail out.
  604. return errno;
  605. }
  606. errno = 0;
  607. // Reload the 'current' value
  608. current = AK::atomic_load(lockp);
  609. }
  610. return value_if_timeout;
  611. }
  612. static int rwlock_wrlock_maybe_timed(pthread_rwlock_t* lockval_p, const struct timespec* timeout = nullptr, bool only_once = false, int value_if_timeout = -1, int value_if_okay = -2)
  613. {
  614. u32* lockp = reinterpret_cast<u32*>(lockval_p);
  615. auto current = AK::atomic_load(lockp);
  616. for (; !only_once;) {
  617. // First, see if this is locked for writing, and if there are any readers.
  618. // if not, lock it.
  619. // If someone is waiting to write, let them have the lock.
  620. if (!(current & writer_locked_mask) && ((u16)current) == 0) {
  621. if (!(current & writer_intent_mask)) {
  622. auto desired = current | writer_locked_mask | writer_intent_mask;
  623. auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
  624. if (!did_exchange)
  625. continue;
  626. // Now that we've locked the value, it's safe to set our thread ID.
  627. AK::atomic_store(reinterpret_cast<i32*>(lockval_p) + 1, pthread_self());
  628. return value_if_okay;
  629. }
  630. }
  631. // That didn't work, if no one else is waiting for the write bit, set it.
  632. if (!(current & writer_wake_mask)) {
  633. auto desired = current | writer_wake_mask | writer_intent_mask;
  634. auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
  635. if (!did_exchange)
  636. continue; // Something interesting happened!
  637. current = desired;
  638. }
  639. // Seems like someone is writing (or is interested in writing and we let them have the lock)
  640. // wait until they're done.
  641. auto rc = futex(lockp, FUTEX_WAIT_BITSET, current, timeout, nullptr, writer_wake_mask);
  642. if (rc < 0 && errno == ETIMEDOUT && timeout) {
  643. return value_if_timeout;
  644. }
  645. if (rc < 0 && errno != EAGAIN) {
  646. // Something broke. let's just bail out.
  647. return errno;
  648. }
  649. errno = 0;
  650. // Reload the 'current' value
  651. current = AK::atomic_load(lockp);
  652. }
  653. return value_if_timeout;
  654. }
  655. int pthread_rwlock_rdlock(pthread_rwlock_t* lockp)
  656. {
  657. if (!lockp)
  658. return EINVAL;
  659. return rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), nullptr, false, 0, 0);
  660. }
  661. int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict lockp, const struct timespec* __restrict timespec)
  662. {
  663. if (!lockp)
  664. return EINVAL;
  665. auto rc = rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), timespec);
  666. if (rc == -1) // "ok"
  667. return 0;
  668. if (rc == -2) // "timed out"
  669. return 1;
  670. return rc;
  671. }
  672. int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict lockp, const struct timespec* __restrict timespec)
  673. {
  674. if (!lockp)
  675. return EINVAL;
  676. auto rc = rwlock_wrlock_maybe_timed(lockp, timespec);
  677. if (rc == -1) // "ok"
  678. return 0;
  679. if (rc == -2) // "timed out"
  680. return 1;
  681. return rc;
  682. }
  683. int pthread_rwlock_tryrdlock(pthread_rwlock_t* lockp)
  684. {
  685. if (!lockp)
  686. return EINVAL;
  687. return rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), nullptr, true, EBUSY, 0);
  688. }
  689. int pthread_rwlock_trywrlock(pthread_rwlock_t* lockp)
  690. {
  691. if (!lockp)
  692. return EINVAL;
  693. return rwlock_wrlock_maybe_timed(lockp, nullptr, true, EBUSY, 0);
  694. }
  695. int pthread_rwlock_unlock(pthread_rwlock_t* lockval_p)
  696. {
  697. if (!lockval_p)
  698. return EINVAL;
  699. // This is a weird API, we don't really know whether we're unlocking write or read...
  700. auto lockp = reinterpret_cast<u32*>(lockval_p);
  701. auto current = AK::atomic_load(lockp, AK::MemoryOrder::memory_order_relaxed);
  702. if (current & writer_locked_mask) {
  703. // If this lock is locked for writing, its owner better be us!
  704. auto owner_id = AK::atomic_load(reinterpret_cast<i32*>(lockval_p) + 1);
  705. auto my_id = pthread_self();
  706. if (owner_id != my_id)
  707. return EINVAL; // you don't own this lock, silly.
  708. // Now just unlock it.
  709. auto desired = current & ~(writer_locked_mask | writer_intent_mask);
  710. AK::atomic_store(lockp, desired, AK::MemoryOrder::memory_order_release);
  711. // Then wake both readers and writers, if any.
  712. auto rc = futex(lockp, FUTEX_WAKE_BITSET, current, nullptr, nullptr, (current & writer_wake_mask) | reader_wake_mask);
  713. if (rc < 0)
  714. return errno;
  715. return 0;
  716. }
  717. for (;;) {
  718. auto count = (u16)current;
  719. if (!count) {
  720. // Are you crazy? this isn't even locked!
  721. return EINVAL;
  722. }
  723. --count;
  724. auto desired = (current << 16) | count;
  725. auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_release);
  726. if (!did_exchange)
  727. continue; // tough luck, try again.
  728. }
  729. // Finally, unlocked at last!
  730. return 0;
  731. }
  732. int pthread_rwlock_wrlock(pthread_rwlock_t* lockp)
  733. {
  734. if (!lockp)
  735. return EINVAL;
  736. return rwlock_wrlock_maybe_timed(lockp, nullptr, false, 0, 0);
  737. }
  738. int pthread_rwlockattr_destroy(pthread_rwlockattr_t*)
  739. {
  740. return 0;
  741. }
  742. int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* __restrict, int* __restrict)
  743. {
  744. ASSERT_NOT_REACHED();
  745. }
  746. int pthread_rwlockattr_init(pthread_rwlockattr_t*)
  747. {
  748. ASSERT_NOT_REACHED();
  749. }
  750. int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int)
  751. {
  752. ASSERT_NOT_REACHED();
  753. }
  754. int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
  755. {
  756. if (prepare)
  757. __pthread_fork_atfork_register_prepare(prepare);
  758. if (parent)
  759. __pthread_fork_atfork_register_parent(parent);
  760. if (child)
  761. __pthread_fork_atfork_register_child(child);
  762. return 0;
  763. }
  764. } // extern "C"