EventLoopImplementationUnix.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BinaryHeap.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/TemporaryChange.h>
  9. #include <AK/Time.h>
  10. #include <AK/WeakPtr.h>
  11. #include <LibCore/Event.h>
  12. #include <LibCore/EventLoopImplementationUnix.h>
  13. #include <LibCore/EventReceiver.h>
  14. #include <LibCore/Notifier.h>
  15. #include <LibCore/Socket.h>
  16. #include <LibCore/System.h>
  17. #include <LibCore/ThreadEventQueue.h>
  18. #include <pthread.h>
  19. #include <sys/select.h>
  20. #include <unistd.h>
  21. namespace Core {
  22. namespace {
  23. struct ThreadData;
  24. class TimeoutSet;
  25. HashMap<pthread_t, ThreadData*> s_thread_data;
  26. pthread_key_t s_thread_key;
  27. static pthread_rwlock_t s_thread_data_lock_impl;
  28. static pthread_rwlock_t* s_thread_data_lock = nullptr;
  29. thread_local pthread_t s_thread_id;
  30. thread_local OwnPtr<ThreadData> s_this_thread_data;
  31. short notification_type_to_poll_events(NotificationType type)
  32. {
  33. short events = 0;
  34. if (has_flag(type, NotificationType::Read))
  35. events |= POLLIN;
  36. if (has_flag(type, NotificationType::Write))
  37. events |= POLLOUT;
  38. return events;
  39. }
  40. bool has_flag(int value, int flag)
  41. {
  42. return (value & flag) == flag;
  43. }
  44. class EventLoopTimeout {
  45. public:
  46. static constexpr ssize_t INVALID_INDEX = NumericLimits<ssize_t>::max();
  47. EventLoopTimeout() { }
  48. virtual ~EventLoopTimeout() = default;
  49. virtual void fire(TimeoutSet& timeout_set, MonotonicTime time) = 0;
  50. MonotonicTime fire_time() const { return m_fire_time; }
  51. void absolutize(Badge<TimeoutSet>, MonotonicTime current_time)
  52. {
  53. m_fire_time = current_time + m_duration;
  54. }
  55. ssize_t& index(Badge<TimeoutSet>) { return m_index; }
  56. void set_index(Badge<TimeoutSet>, ssize_t index) { m_index = index; }
  57. bool is_scheduled() const { return m_index != INVALID_INDEX; }
  58. protected:
  59. union {
  60. AK::Duration m_duration;
  61. MonotonicTime m_fire_time;
  62. };
  63. private:
  64. ssize_t m_index = INVALID_INDEX;
  65. };
  66. class TimeoutSet {
  67. public:
  68. TimeoutSet() = default;
  69. Optional<MonotonicTime> next_timer_expiration()
  70. {
  71. if (!m_heap.is_empty()) {
  72. return m_heap.peek_min()->fire_time();
  73. } else {
  74. return {};
  75. }
  76. }
  77. void absolutize_relative_timeouts(MonotonicTime current_time)
  78. {
  79. for (auto timeout : m_scheduled_timeouts) {
  80. timeout->absolutize({}, current_time);
  81. m_heap.insert(timeout);
  82. }
  83. m_scheduled_timeouts.clear();
  84. }
  85. size_t fire_expired(MonotonicTime current_time)
  86. {
  87. size_t fired_count = 0;
  88. while (!m_heap.is_empty()) {
  89. auto& timeout = *m_heap.peek_min();
  90. if (timeout.fire_time() <= current_time) {
  91. ++fired_count;
  92. m_heap.pop_min();
  93. timeout.set_index({}, EventLoopTimeout::INVALID_INDEX);
  94. timeout.fire(*this, current_time);
  95. } else {
  96. break;
  97. }
  98. }
  99. return fired_count;
  100. }
  101. void schedule_relative(EventLoopTimeout* timeout)
  102. {
  103. timeout->set_index({}, -1 - static_cast<ssize_t>(m_scheduled_timeouts.size()));
  104. m_scheduled_timeouts.append(timeout);
  105. }
  106. void schedule_absolute(EventLoopTimeout* timeout)
  107. {
  108. m_heap.insert(timeout);
  109. }
  110. void unschedule(EventLoopTimeout* timeout)
  111. {
  112. if (timeout->index({}) < 0) {
  113. size_t i = -1 - timeout->index({});
  114. size_t j = m_scheduled_timeouts.size() - 1;
  115. VERIFY(m_scheduled_timeouts[i] == timeout);
  116. swap(m_scheduled_timeouts[i], m_scheduled_timeouts[j]);
  117. swap(m_scheduled_timeouts[i]->index({}), m_scheduled_timeouts[j]->index({}));
  118. (void)m_scheduled_timeouts.take_last();
  119. } else {
  120. m_heap.pop(timeout->index({}));
  121. }
  122. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  123. }
  124. void clear()
  125. {
  126. for (auto* timeout : m_heap.nodes_in_arbitrary_order())
  127. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  128. m_heap.clear();
  129. for (auto* timeout : m_scheduled_timeouts)
  130. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  131. m_scheduled_timeouts.clear();
  132. }
  133. private:
  134. IntrusiveBinaryHeap<
  135. EventLoopTimeout*,
  136. decltype([](EventLoopTimeout* a, EventLoopTimeout* b) {
  137. return a->fire_time() < b->fire_time();
  138. }),
  139. decltype([](EventLoopTimeout* timeout, size_t index) {
  140. timeout->set_index({}, static_cast<ssize_t>(index));
  141. }),
  142. 8>
  143. m_heap;
  144. Vector<EventLoopTimeout*, 8> m_scheduled_timeouts;
  145. };
  146. class EventLoopTimer final : public EventLoopTimeout {
  147. public:
  148. EventLoopTimer() = default;
  149. void reload(MonotonicTime const& now) { m_fire_time = now + interval; }
  150. virtual void fire(TimeoutSet& timeout_set, MonotonicTime current_time) override
  151. {
  152. auto strong_owner = owner.strong_ref();
  153. if (!strong_owner)
  154. return;
  155. if (should_reload) {
  156. MonotonicTime next_fire_time = m_fire_time + interval;
  157. if (next_fire_time <= current_time) {
  158. next_fire_time = current_time + interval;
  159. }
  160. m_fire_time = next_fire_time;
  161. if (next_fire_time != current_time) {
  162. timeout_set.schedule_absolute(this);
  163. } else {
  164. // NOTE: Unfortunately we need to treat timeouts with the zero interval in a
  165. // special way. TimeoutSet::schedule_absolute for them will result in an
  166. // infinite loop. TimeoutSet::schedule_relative, on the other hand, will do a
  167. // correct thing of scheduling them for the next iteration of the loop.
  168. m_duration = {};
  169. timeout_set.schedule_relative(this);
  170. }
  171. }
  172. // FIXME: While TimerShouldFireWhenNotVisible::Yes prevents the timer callback from being
  173. // called, it doesn't allow event loop to sleep since it needs to constantly check if
  174. // is_visible_for_timer_purposes changed. A better solution will be to unregister a
  175. // timer and register it back again when needed. This also has an added benefit of
  176. // making fire_when_not_visible and is_visible_for_timer_purposes obsolete.
  177. if (fire_when_not_visible == TimerShouldFireWhenNotVisible::Yes || strong_owner->is_visible_for_timer_purposes())
  178. ThreadEventQueue::current().post_event(*strong_owner, make<TimerEvent>());
  179. }
  180. AK::Duration interval;
  181. bool should_reload { false };
  182. TimerShouldFireWhenNotVisible fire_when_not_visible { TimerShouldFireWhenNotVisible::No };
  183. WeakPtr<EventReceiver> owner;
  184. pthread_t owner_thread { 0 };
  185. Atomic<bool> is_being_deleted { false };
  186. };
  187. struct ThreadData {
  188. static ThreadData& the()
  189. {
  190. if (!s_thread_data_lock) {
  191. pthread_rwlock_init(&s_thread_data_lock_impl, nullptr);
  192. s_thread_data_lock = &s_thread_data_lock_impl;
  193. pthread_key_create(&s_thread_key, [](void*) {
  194. s_this_thread_data.clear();
  195. });
  196. }
  197. if (s_thread_id == 0)
  198. s_thread_id = pthread_self();
  199. ThreadData* data = nullptr;
  200. if (!s_this_thread_data) {
  201. data = new ThreadData;
  202. s_this_thread_data = adopt_own(*data);
  203. pthread_rwlock_wrlock(&*s_thread_data_lock);
  204. s_thread_data.set(s_thread_id, s_this_thread_data.ptr());
  205. pthread_rwlock_unlock(&*s_thread_data_lock);
  206. } else {
  207. data = s_this_thread_data.ptr();
  208. }
  209. return *data;
  210. }
  211. static ThreadData* for_thread(pthread_t thread_id)
  212. {
  213. pthread_rwlock_rdlock(&*s_thread_data_lock);
  214. auto result = s_thread_data.get(thread_id).value_or(nullptr);
  215. pthread_rwlock_unlock(&*s_thread_data_lock);
  216. return result;
  217. }
  218. ThreadData()
  219. {
  220. pid = getpid();
  221. initialize_wake_pipe();
  222. }
  223. ~ThreadData()
  224. {
  225. pthread_rwlock_wrlock(&*s_thread_data_lock);
  226. s_thread_data.remove(s_thread_id);
  227. pthread_rwlock_unlock(&*s_thread_data_lock);
  228. }
  229. void initialize_wake_pipe()
  230. {
  231. if (wake_pipe_fds[0] != -1)
  232. close(wake_pipe_fds[0]);
  233. if (wake_pipe_fds[1] != -1)
  234. close(wake_pipe_fds[1]);
  235. wake_pipe_fds = MUST(Core::System::pipe2(O_CLOEXEC));
  236. // The wake pipe informs us of POSIX signals as well as manual calls to wake()
  237. VERIFY(poll_fds.size() == 0);
  238. poll_fds.append({ .fd = wake_pipe_fds[0], .events = POLLIN, .revents = 0 });
  239. notifier_by_index.append(nullptr);
  240. }
  241. // Each thread has its own timers, notifiers and a wake pipe.
  242. TimeoutSet timeouts;
  243. Vector<pollfd> poll_fds;
  244. HashMap<Notifier*, size_t> notifier_by_ptr;
  245. Vector<Notifier*> notifier_by_index;
  246. // The wake pipe is used to notify another event loop that someone has called wake(), or a signal has been received.
  247. // wake() writes 0i32 into the pipe, signals write the signal number (guaranteed non-zero).
  248. Array<int, 2> wake_pipe_fds { -1, -1 };
  249. pid_t pid { 0 };
  250. };
  251. }
  252. EventLoopImplementationUnix::EventLoopImplementationUnix()
  253. : m_wake_pipe_fds(ThreadData::the().wake_pipe_fds)
  254. {
  255. }
  256. EventLoopImplementationUnix::~EventLoopImplementationUnix() = default;
  257. int EventLoopImplementationUnix::exec()
  258. {
  259. for (;;) {
  260. if (m_exit_requested)
  261. return m_exit_code;
  262. pump(PumpMode::WaitForEvents);
  263. }
  264. VERIFY_NOT_REACHED();
  265. }
  266. size_t EventLoopImplementationUnix::pump(PumpMode mode)
  267. {
  268. static_cast<EventLoopManagerUnix&>(EventLoopManager::the()).wait_for_events(mode);
  269. return ThreadEventQueue::current().process();
  270. }
  271. void EventLoopImplementationUnix::quit(int code)
  272. {
  273. m_exit_requested = true;
  274. m_exit_code = code;
  275. }
  276. void EventLoopImplementationUnix::unquit()
  277. {
  278. m_exit_requested = false;
  279. m_exit_code = 0;
  280. }
  281. bool EventLoopImplementationUnix::was_exit_requested() const
  282. {
  283. return m_exit_requested;
  284. }
  285. void EventLoopImplementationUnix::post_event(EventReceiver& receiver, NonnullOwnPtr<Event>&& event)
  286. {
  287. m_thread_event_queue.post_event(receiver, move(event));
  288. if (&m_thread_event_queue != &ThreadEventQueue::current())
  289. wake();
  290. }
  291. void EventLoopImplementationUnix::wake()
  292. {
  293. int wake_event = 0;
  294. MUST(Core::System::write(m_wake_pipe_fds[1], { &wake_event, sizeof(wake_event) }));
  295. }
  296. void EventLoopManagerUnix::wait_for_events(EventLoopImplementation::PumpMode mode)
  297. {
  298. auto& thread_data = ThreadData::the();
  299. retry:
  300. bool has_pending_events = ThreadEventQueue::current().has_pending_events();
  301. auto time_at_iteration_start = MonotonicTime::now_coarse();
  302. thread_data.timeouts.absolutize_relative_timeouts(time_at_iteration_start);
  303. // Figure out how long to wait at maximum.
  304. // This mainly depends on the PumpMode and whether we have pending events, but also the next expiring timer.
  305. int timeout = 0;
  306. bool should_wait_forever = false;
  307. if (mode == EventLoopImplementation::PumpMode::WaitForEvents && !has_pending_events) {
  308. auto next_timer_expiration = thread_data.timeouts.next_timer_expiration();
  309. if (next_timer_expiration.has_value()) {
  310. auto computed_timeout = next_timer_expiration.value() - time_at_iteration_start;
  311. if (computed_timeout.is_negative())
  312. computed_timeout = AK::Duration::zero();
  313. i64 true_timeout = computed_timeout.to_milliseconds();
  314. timeout = static_cast<i32>(min<i64>(AK::NumericLimits<i32>::max(), true_timeout));
  315. } else {
  316. should_wait_forever = true;
  317. }
  318. }
  319. try_select_again:
  320. // select() and wait for file system events, calls to wake(), POSIX signals, or timer expirations.
  321. ErrorOr<int> error_or_marked_fd_count = System::poll(thread_data.poll_fds, should_wait_forever ? -1 : timeout);
  322. auto time_after_poll = MonotonicTime::now_coarse();
  323. // Because POSIX, we might spuriously return from select() with EINTR; just select again.
  324. if (error_or_marked_fd_count.is_error()) {
  325. if (error_or_marked_fd_count.error().code() == EINTR)
  326. goto try_select_again;
  327. dbgln("EventLoopImplementationUnix::wait_for_events: {}", error_or_marked_fd_count.error());
  328. VERIFY_NOT_REACHED();
  329. }
  330. // We woke up due to a call to wake() or a POSIX signal.
  331. // Handle signals and see whether we need to handle events as well.
  332. if (has_flag(thread_data.poll_fds[0].revents, POLLIN)) {
  333. int wake_events[8];
  334. ssize_t nread;
  335. // We might receive another signal while read()ing here. The signal will go to the handle_signal properly,
  336. // but we get interrupted. Therefore, just retry while we were interrupted.
  337. do {
  338. errno = 0;
  339. nread = read(thread_data.wake_pipe_fds[0], wake_events, sizeof(wake_events));
  340. if (nread == 0)
  341. break;
  342. } while (nread < 0 && errno == EINTR);
  343. if (nread < 0) {
  344. perror("EventLoopImplementationUnix::wait_for_events: read from wake pipe");
  345. VERIFY_NOT_REACHED();
  346. }
  347. VERIFY(nread > 0);
  348. bool wake_requested = false;
  349. int event_count = nread / sizeof(wake_events[0]);
  350. for (int i = 0; i < event_count; i++) {
  351. if (wake_events[i] != 0)
  352. dispatch_signal(wake_events[i]);
  353. else
  354. wake_requested = true;
  355. }
  356. if (!wake_requested && nread == sizeof(wake_events))
  357. goto retry;
  358. }
  359. if (error_or_marked_fd_count.value() != 0) {
  360. // Handle file system notifiers by making them normal events.
  361. for (size_t i = 1; i < thread_data.poll_fds.size(); ++i) {
  362. // FIXME: Make the check work under Android, pehaps use ALooper
  363. #ifdef AK_OS_ANDROID
  364. auto& notifier = *thread_data.notifier_by_index[i];
  365. ThreadEventQueue::current().post_event(notifier, make<NotifierActivationEvent>(notifier.fd(), notifier.type()));
  366. #else
  367. auto& revents = thread_data.poll_fds[i].revents;
  368. auto& notifier = *thread_data.notifier_by_index[i];
  369. NotificationType type = NotificationType::None;
  370. if (has_flag(revents, POLLIN))
  371. type |= NotificationType::Read;
  372. if (has_flag(revents, POLLOUT))
  373. type |= NotificationType::Write;
  374. if (has_flag(revents, POLLHUP))
  375. type |= NotificationType::HangUp;
  376. if (has_flag(revents, POLLERR))
  377. type |= NotificationType::Error;
  378. type &= notifier.type();
  379. if (type != NotificationType::None)
  380. ThreadEventQueue::current().post_event(notifier, make<NotifierActivationEvent>(notifier.fd(), type));
  381. #endif
  382. }
  383. }
  384. // Handle expired timers.
  385. thread_data.timeouts.fire_expired(time_after_poll);
  386. }
  387. class SignalHandlers : public RefCounted<SignalHandlers> {
  388. AK_MAKE_NONCOPYABLE(SignalHandlers);
  389. AK_MAKE_NONMOVABLE(SignalHandlers);
  390. public:
  391. SignalHandlers(int signal_number, void (*handle_signal)(int));
  392. ~SignalHandlers();
  393. void dispatch();
  394. int add(Function<void(int)>&& handler);
  395. bool remove(int handler_id);
  396. bool is_empty() const
  397. {
  398. if (m_calling_handlers) {
  399. for (auto& handler : m_handlers_pending) {
  400. if (handler.value)
  401. return false; // an add is pending
  402. }
  403. }
  404. return m_handlers.is_empty();
  405. }
  406. bool have(int handler_id) const
  407. {
  408. if (m_calling_handlers) {
  409. auto it = m_handlers_pending.find(handler_id);
  410. if (it != m_handlers_pending.end()) {
  411. if (!it->value)
  412. return false; // a deletion is pending
  413. }
  414. }
  415. return m_handlers.contains(handler_id);
  416. }
  417. int m_signal_number;
  418. void (*m_original_handler)(int); // TODO: can't use sighandler_t?
  419. HashMap<int, Function<void(int)>> m_handlers;
  420. HashMap<int, Function<void(int)>> m_handlers_pending;
  421. bool m_calling_handlers { false };
  422. };
  423. struct SignalHandlersInfo {
  424. HashMap<int, NonnullRefPtr<SignalHandlers>> signal_handlers;
  425. int next_signal_id { 0 };
  426. };
  427. static Singleton<SignalHandlersInfo> s_signals;
  428. template<bool create_if_null = true>
  429. inline SignalHandlersInfo* signals_info()
  430. {
  431. return s_signals.ptr();
  432. }
  433. void EventLoopManagerUnix::dispatch_signal(int signal_number)
  434. {
  435. auto& info = *signals_info();
  436. auto handlers = info.signal_handlers.find(signal_number);
  437. if (handlers != info.signal_handlers.end()) {
  438. // Make sure we bump the ref count while dispatching the handlers!
  439. // This allows a handler to unregister/register while the handlers
  440. // are being called!
  441. auto handler = handlers->value;
  442. handler->dispatch();
  443. }
  444. }
  445. void EventLoopImplementationUnix::notify_forked_and_in_child()
  446. {
  447. auto& thread_data = ThreadData::the();
  448. thread_data.timeouts.clear();
  449. thread_data.poll_fds.clear();
  450. thread_data.notifier_by_ptr.clear();
  451. thread_data.notifier_by_index.clear();
  452. thread_data.initialize_wake_pipe();
  453. if (auto* info = signals_info<false>()) {
  454. info->signal_handlers.clear();
  455. info->next_signal_id = 0;
  456. }
  457. thread_data.pid = getpid();
  458. }
  459. SignalHandlers::SignalHandlers(int signal_number, void (*handle_signal)(int))
  460. : m_signal_number(signal_number)
  461. , m_original_handler(signal(signal_number, handle_signal))
  462. {
  463. }
  464. SignalHandlers::~SignalHandlers()
  465. {
  466. signal(m_signal_number, m_original_handler);
  467. }
  468. void SignalHandlers::dispatch()
  469. {
  470. TemporaryChange change(m_calling_handlers, true);
  471. for (auto& handler : m_handlers)
  472. handler.value(m_signal_number);
  473. if (!m_handlers_pending.is_empty()) {
  474. // Apply pending adds/removes
  475. for (auto& handler : m_handlers_pending) {
  476. if (handler.value) {
  477. auto result = m_handlers.set(handler.key, move(handler.value));
  478. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  479. } else {
  480. m_handlers.remove(handler.key);
  481. }
  482. }
  483. m_handlers_pending.clear();
  484. }
  485. }
  486. int SignalHandlers::add(Function<void(int)>&& handler)
  487. {
  488. int id = ++signals_info()->next_signal_id; // TODO: worry about wrapping and duplicates?
  489. if (m_calling_handlers)
  490. m_handlers_pending.set(id, move(handler));
  491. else
  492. m_handlers.set(id, move(handler));
  493. return id;
  494. }
  495. bool SignalHandlers::remove(int handler_id)
  496. {
  497. VERIFY(handler_id != 0);
  498. if (m_calling_handlers) {
  499. auto it = m_handlers.find(handler_id);
  500. if (it != m_handlers.end()) {
  501. // Mark pending remove
  502. m_handlers_pending.set(handler_id, {});
  503. return true;
  504. }
  505. it = m_handlers_pending.find(handler_id);
  506. if (it != m_handlers_pending.end()) {
  507. if (!it->value)
  508. return false; // already was marked as deleted
  509. it->value = nullptr;
  510. return true;
  511. }
  512. return false;
  513. }
  514. return m_handlers.remove(handler_id);
  515. }
  516. void EventLoopManagerUnix::handle_signal(int signal_number)
  517. {
  518. VERIFY(signal_number != 0);
  519. auto& thread_data = ThreadData::the();
  520. // We MUST check if the current pid still matches, because there
  521. // is a window between fork() and exec() where a signal delivered
  522. // to our fork could be inadvertently routed to the parent process!
  523. if (getpid() == thread_data.pid) {
  524. int nwritten = write(thread_data.wake_pipe_fds[1], &signal_number, sizeof(signal_number));
  525. if (nwritten < 0) {
  526. perror("EventLoopImplementationUnix::register_signal: write");
  527. VERIFY_NOT_REACHED();
  528. }
  529. } else {
  530. // We're a fork who received a signal, reset thread_data.pid.
  531. thread_data.pid = getpid();
  532. }
  533. }
  534. int EventLoopManagerUnix::register_signal(int signal_number, Function<void(int)> handler)
  535. {
  536. VERIFY(signal_number != 0);
  537. auto& info = *signals_info();
  538. auto handlers = info.signal_handlers.find(signal_number);
  539. if (handlers == info.signal_handlers.end()) {
  540. auto signal_handlers = adopt_ref(*new SignalHandlers(signal_number, EventLoopManagerUnix::handle_signal));
  541. auto handler_id = signal_handlers->add(move(handler));
  542. info.signal_handlers.set(signal_number, move(signal_handlers));
  543. return handler_id;
  544. } else {
  545. return handlers->value->add(move(handler));
  546. }
  547. }
  548. void EventLoopManagerUnix::unregister_signal(int handler_id)
  549. {
  550. VERIFY(handler_id != 0);
  551. int remove_signal_number = 0;
  552. auto& info = *signals_info();
  553. for (auto& h : info.signal_handlers) {
  554. auto& handlers = *h.value;
  555. if (handlers.remove(handler_id)) {
  556. if (handlers.is_empty())
  557. remove_signal_number = handlers.m_signal_number;
  558. break;
  559. }
  560. }
  561. if (remove_signal_number != 0)
  562. info.signal_handlers.remove(remove_signal_number);
  563. }
  564. intptr_t EventLoopManagerUnix::register_timer(EventReceiver& object, int milliseconds, bool should_reload, TimerShouldFireWhenNotVisible fire_when_not_visible)
  565. {
  566. VERIFY(milliseconds >= 0);
  567. auto& thread_data = ThreadData::the();
  568. auto timer = new EventLoopTimer;
  569. timer->owner_thread = s_thread_id;
  570. timer->owner = object;
  571. timer->interval = AK::Duration::from_milliseconds(milliseconds);
  572. timer->reload(MonotonicTime::now_coarse());
  573. timer->should_reload = should_reload;
  574. timer->fire_when_not_visible = fire_when_not_visible;
  575. thread_data.timeouts.schedule_absolute(timer);
  576. return bit_cast<intptr_t>(timer);
  577. }
  578. void EventLoopManagerUnix::unregister_timer(intptr_t timer_id)
  579. {
  580. auto* timer = bit_cast<EventLoopTimer*>(timer_id);
  581. auto thread_data_ptr = ThreadData::for_thread(timer->owner_thread);
  582. if (!thread_data_ptr)
  583. return;
  584. auto& thread_data = *thread_data_ptr;
  585. auto expected = false;
  586. if (timer->is_being_deleted.compare_exchange_strong(expected, true, AK::MemoryOrder::memory_order_acq_rel)) {
  587. if (timer->is_scheduled())
  588. thread_data.timeouts.unschedule(timer);
  589. delete timer;
  590. }
  591. }
  592. void EventLoopManagerUnix::register_notifier(Notifier& notifier)
  593. {
  594. auto& thread_data = ThreadData::the();
  595. thread_data.notifier_by_ptr.set(&notifier, thread_data.poll_fds.size());
  596. thread_data.notifier_by_index.append(&notifier);
  597. thread_data.poll_fds.append({
  598. .fd = notifier.fd(),
  599. .events = notification_type_to_poll_events(notifier.type()),
  600. .revents = 0,
  601. });
  602. notifier.set_owner_thread(s_thread_id);
  603. }
  604. void EventLoopManagerUnix::unregister_notifier(Notifier& notifier)
  605. {
  606. auto thread_data_ptr = ThreadData::for_thread(notifier.owner_thread());
  607. if (!thread_data_ptr)
  608. return;
  609. auto& thread_data = *thread_data_ptr;
  610. auto it = thread_data.notifier_by_ptr.find(&notifier);
  611. VERIFY(it != thread_data.notifier_by_ptr.end());
  612. size_t notifier_index = it->value;
  613. thread_data.notifier_by_ptr.remove(it);
  614. if (notifier_index + 1 != thread_data.poll_fds.size()) {
  615. swap(thread_data.poll_fds[notifier_index], thread_data.poll_fds.last());
  616. swap(thread_data.notifier_by_index[notifier_index], thread_data.notifier_by_index.last());
  617. thread_data.notifier_by_ptr.set(thread_data.notifier_by_index[notifier_index], notifier_index);
  618. }
  619. thread_data.poll_fds.take_last();
  620. thread_data.notifier_by_index.take_last();
  621. }
  622. void EventLoopManagerUnix::did_post_event()
  623. {
  624. }
  625. EventLoopManagerUnix::~EventLoopManagerUnix() = default;
  626. NonnullOwnPtr<EventLoopImplementation> EventLoopManagerUnix::make_implementation()
  627. {
  628. return adopt_own(*new EventLoopImplementationUnix);
  629. }
  630. }