EventLoopImplementationUnix.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BinaryHeap.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/TemporaryChange.h>
  9. #include <AK/Time.h>
  10. #include <AK/WeakPtr.h>
  11. #include <LibCore/Event.h>
  12. #include <LibCore/EventLoopImplementationUnix.h>
  13. #include <LibCore/EventReceiver.h>
  14. #include <LibCore/Notifier.h>
  15. #include <LibCore/Socket.h>
  16. #include <LibCore/System.h>
  17. #include <LibCore/ThreadEventQueue.h>
  18. #include <pthread.h>
  19. #include <sys/select.h>
  20. #include <unistd.h>
  21. namespace Core {
  22. namespace {
  23. struct ThreadData;
  24. class TimeoutSet;
  25. HashMap<pthread_t, OwnPtr<ThreadData>> s_thread_data;
  26. static pthread_rwlock_t s_thread_data_lock_impl;
  27. static pthread_rwlock_t* s_thread_data_lock = nullptr;
  28. thread_local pthread_t s_thread_id;
  29. short notification_type_to_poll_events(NotificationType type)
  30. {
  31. short events = 0;
  32. if (has_flag(type, NotificationType::Read))
  33. events |= POLLIN;
  34. if (has_flag(type, NotificationType::Write))
  35. events |= POLLOUT;
  36. return events;
  37. }
  38. bool has_flag(int value, int flag)
  39. {
  40. return (value & flag) == flag;
  41. }
  42. class EventLoopTimeout {
  43. public:
  44. static constexpr ssize_t INVALID_INDEX = NumericLimits<ssize_t>::max();
  45. EventLoopTimeout() { }
  46. virtual ~EventLoopTimeout() = default;
  47. virtual void fire(TimeoutSet& timeout_set, MonotonicTime time) = 0;
  48. MonotonicTime fire_time() const { return m_fire_time; }
  49. void absolutize(Badge<TimeoutSet>, MonotonicTime current_time)
  50. {
  51. m_fire_time = current_time + m_duration;
  52. }
  53. ssize_t& index(Badge<TimeoutSet>) { return m_index; }
  54. void set_index(Badge<TimeoutSet>, ssize_t index) { m_index = index; }
  55. bool is_scheduled() const { return m_index != INVALID_INDEX; }
  56. protected:
  57. union {
  58. Duration m_duration;
  59. MonotonicTime m_fire_time;
  60. };
  61. private:
  62. ssize_t m_index = INVALID_INDEX;
  63. };
  64. class TimeoutSet {
  65. public:
  66. TimeoutSet() = default;
  67. Optional<MonotonicTime> next_timer_expiration()
  68. {
  69. if (!m_heap.is_empty()) {
  70. return m_heap.peek_min()->fire_time();
  71. } else {
  72. return {};
  73. }
  74. }
  75. void absolutize_relative_timeouts(MonotonicTime current_time)
  76. {
  77. for (auto timeout : m_scheduled_timeouts) {
  78. timeout->absolutize({}, current_time);
  79. m_heap.insert(timeout);
  80. }
  81. m_scheduled_timeouts.clear();
  82. }
  83. size_t fire_expired(MonotonicTime current_time)
  84. {
  85. size_t fired_count = 0;
  86. while (!m_heap.is_empty()) {
  87. auto& timeout = *m_heap.peek_min();
  88. if (timeout.fire_time() <= current_time) {
  89. ++fired_count;
  90. m_heap.pop_min();
  91. timeout.set_index({}, EventLoopTimeout::INVALID_INDEX);
  92. timeout.fire(*this, current_time);
  93. } else {
  94. break;
  95. }
  96. }
  97. return fired_count;
  98. }
  99. void schedule_relative(EventLoopTimeout* timeout)
  100. {
  101. timeout->set_index({}, -1 - static_cast<ssize_t>(m_scheduled_timeouts.size()));
  102. m_scheduled_timeouts.append(timeout);
  103. }
  104. void schedule_absolute(EventLoopTimeout* timeout)
  105. {
  106. m_heap.insert(timeout);
  107. }
  108. void unschedule(EventLoopTimeout* timeout)
  109. {
  110. if (timeout->index({}) < 0) {
  111. size_t i = -1 - timeout->index({});
  112. size_t j = m_scheduled_timeouts.size() - 1;
  113. VERIFY(m_scheduled_timeouts[i] == timeout);
  114. swap(m_scheduled_timeouts[i], m_scheduled_timeouts[j]);
  115. swap(m_scheduled_timeouts[i]->index({}), m_scheduled_timeouts[j]->index({}));
  116. (void)m_scheduled_timeouts.take_last();
  117. } else {
  118. m_heap.pop(timeout->index({}));
  119. }
  120. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  121. }
  122. void clear()
  123. {
  124. for (auto* timeout : m_heap.nodes_in_arbitrary_order())
  125. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  126. m_heap.clear();
  127. for (auto* timeout : m_scheduled_timeouts)
  128. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  129. m_scheduled_timeouts.clear();
  130. }
  131. private:
  132. IntrusiveBinaryHeap<
  133. EventLoopTimeout*,
  134. decltype([](EventLoopTimeout* a, EventLoopTimeout* b) {
  135. return a->fire_time() < b->fire_time();
  136. }),
  137. decltype([](EventLoopTimeout* timeout, size_t index) {
  138. timeout->set_index({}, static_cast<ssize_t>(index));
  139. }),
  140. 8>
  141. m_heap;
  142. Vector<EventLoopTimeout*, 8> m_scheduled_timeouts;
  143. };
  144. class EventLoopTimer final : public EventLoopTimeout {
  145. public:
  146. EventLoopTimer() = default;
  147. void reload(MonotonicTime const& now) { m_fire_time = now + interval; }
  148. virtual void fire(TimeoutSet& timeout_set, MonotonicTime current_time) override
  149. {
  150. auto strong_owner = owner.strong_ref();
  151. if (!strong_owner)
  152. return;
  153. if (should_reload) {
  154. MonotonicTime next_fire_time = m_fire_time + interval;
  155. if (next_fire_time <= current_time) {
  156. next_fire_time = current_time + interval;
  157. }
  158. m_fire_time = next_fire_time;
  159. if (next_fire_time != current_time) {
  160. timeout_set.schedule_absolute(this);
  161. } else {
  162. // NOTE: Unfortunately we need to treat timeouts with the zero interval in a
  163. // special way. TimeoutSet::schedule_absolute for them will result in an
  164. // infinite loop. TimeoutSet::schedule_relative, on the other hand, will do a
  165. // correct thing of scheduling them for the next iteration of the loop.
  166. m_duration = {};
  167. timeout_set.schedule_relative(this);
  168. }
  169. }
  170. // FIXME: While TimerShouldFireWhenNotVisible::Yes prevents the timer callback from being
  171. // called, it doesn't allow event loop to sleep since it needs to constantly check if
  172. // is_visible_for_timer_purposes changed. A better solution will be to unregister a
  173. // timer and register it back again when needed. This also has an added benefit of
  174. // making fire_when_not_visible and is_visible_for_timer_purposes obsolete.
  175. if (fire_when_not_visible == TimerShouldFireWhenNotVisible::Yes || strong_owner->is_visible_for_timer_purposes())
  176. ThreadEventQueue::current().post_event(*strong_owner, make<TimerEvent>());
  177. }
  178. Duration interval;
  179. bool should_reload { false };
  180. TimerShouldFireWhenNotVisible fire_when_not_visible { TimerShouldFireWhenNotVisible::No };
  181. WeakPtr<EventReceiver> owner;
  182. pthread_t owner_thread { 0 };
  183. Atomic<bool> is_being_deleted { false };
  184. };
  185. struct ThreadData {
  186. static ThreadData& the()
  187. {
  188. if (!s_thread_data_lock) {
  189. pthread_rwlock_init(&s_thread_data_lock_impl, nullptr);
  190. s_thread_data_lock = &s_thread_data_lock_impl;
  191. }
  192. if (s_thread_id == 0)
  193. s_thread_id = pthread_self();
  194. ThreadData* data = nullptr;
  195. pthread_rwlock_rdlock(&*s_thread_data_lock);
  196. if (!s_thread_data.contains(s_thread_id)) {
  197. data = new ThreadData;
  198. pthread_rwlock_unlock(&*s_thread_data_lock);
  199. pthread_rwlock_wrlock(&*s_thread_data_lock);
  200. s_thread_data.set(s_thread_id, adopt_own(*data));
  201. } else {
  202. data = s_thread_data.get(s_thread_id).value();
  203. }
  204. pthread_rwlock_unlock(&*s_thread_data_lock);
  205. return *data;
  206. }
  207. static ThreadData& for_thread(pthread_t thread_id)
  208. {
  209. pthread_rwlock_rdlock(&*s_thread_data_lock);
  210. auto& result = *s_thread_data.get(thread_id).value();
  211. pthread_rwlock_unlock(&*s_thread_data_lock);
  212. return result;
  213. }
  214. ThreadData()
  215. {
  216. pid = getpid();
  217. initialize_wake_pipe();
  218. }
  219. void initialize_wake_pipe()
  220. {
  221. if (wake_pipe_fds[0] != -1)
  222. close(wake_pipe_fds[0]);
  223. if (wake_pipe_fds[1] != -1)
  224. close(wake_pipe_fds[1]);
  225. wake_pipe_fds = MUST(Core::System::pipe2(O_CLOEXEC));
  226. // The wake pipe informs us of POSIX signals as well as manual calls to wake()
  227. VERIFY(poll_fds.size() == 0);
  228. poll_fds.append({ .fd = wake_pipe_fds[0], .events = POLLIN, .revents = 0 });
  229. notifier_by_index.append(nullptr);
  230. }
  231. // Each thread has its own timers, notifiers and a wake pipe.
  232. TimeoutSet timeouts;
  233. Vector<pollfd> poll_fds;
  234. HashMap<Notifier*, size_t> notifier_by_ptr;
  235. Vector<Notifier*> notifier_by_index;
  236. // The wake pipe is used to notify another event loop that someone has called wake(), or a signal has been received.
  237. // wake() writes 0i32 into the pipe, signals write the signal number (guaranteed non-zero).
  238. Array<int, 2> wake_pipe_fds { -1, -1 };
  239. pid_t pid { 0 };
  240. };
  241. }
  242. EventLoopImplementationUnix::EventLoopImplementationUnix()
  243. : m_wake_pipe_fds(ThreadData::the().wake_pipe_fds)
  244. {
  245. }
  246. EventLoopImplementationUnix::~EventLoopImplementationUnix() = default;
  247. int EventLoopImplementationUnix::exec()
  248. {
  249. for (;;) {
  250. if (m_exit_requested)
  251. return m_exit_code;
  252. pump(PumpMode::WaitForEvents);
  253. }
  254. VERIFY_NOT_REACHED();
  255. }
  256. size_t EventLoopImplementationUnix::pump(PumpMode mode)
  257. {
  258. static_cast<EventLoopManagerUnix&>(EventLoopManager::the()).wait_for_events(mode);
  259. return ThreadEventQueue::current().process();
  260. }
  261. void EventLoopImplementationUnix::quit(int code)
  262. {
  263. m_exit_requested = true;
  264. m_exit_code = code;
  265. }
  266. void EventLoopImplementationUnix::unquit()
  267. {
  268. m_exit_requested = false;
  269. m_exit_code = 0;
  270. }
  271. bool EventLoopImplementationUnix::was_exit_requested() const
  272. {
  273. return m_exit_requested;
  274. }
  275. void EventLoopImplementationUnix::post_event(EventReceiver& receiver, NonnullOwnPtr<Event>&& event)
  276. {
  277. m_thread_event_queue.post_event(receiver, move(event));
  278. if (&m_thread_event_queue != &ThreadEventQueue::current())
  279. wake();
  280. }
  281. void EventLoopImplementationUnix::wake()
  282. {
  283. int wake_event = 0;
  284. MUST(Core::System::write(m_wake_pipe_fds[1], { &wake_event, sizeof(wake_event) }));
  285. }
  286. void EventLoopManagerUnix::wait_for_events(EventLoopImplementation::PumpMode mode)
  287. {
  288. auto& thread_data = ThreadData::the();
  289. retry:
  290. bool has_pending_events = ThreadEventQueue::current().has_pending_events();
  291. auto time_at_iteration_start = MonotonicTime::now_coarse();
  292. thread_data.timeouts.absolutize_relative_timeouts(time_at_iteration_start);
  293. // Figure out how long to wait at maximum.
  294. // This mainly depends on the PumpMode and whether we have pending events, but also the next expiring timer.
  295. int timeout = 0;
  296. bool should_wait_forever = false;
  297. if (mode == EventLoopImplementation::PumpMode::WaitForEvents && !has_pending_events) {
  298. auto next_timer_expiration = thread_data.timeouts.next_timer_expiration();
  299. if (next_timer_expiration.has_value()) {
  300. auto computed_timeout = next_timer_expiration.value() - time_at_iteration_start;
  301. if (computed_timeout.is_negative())
  302. computed_timeout = Duration::zero();
  303. i64 true_timeout = computed_timeout.to_milliseconds();
  304. timeout = static_cast<i32>(min<i64>(AK::NumericLimits<i32>::max(), true_timeout));
  305. } else {
  306. should_wait_forever = true;
  307. }
  308. }
  309. try_select_again:
  310. // select() and wait for file system events, calls to wake(), POSIX signals, or timer expirations.
  311. ErrorOr<int> error_or_marked_fd_count = System::poll(thread_data.poll_fds, should_wait_forever ? -1 : timeout);
  312. auto time_after_poll = MonotonicTime::now_coarse();
  313. // Because POSIX, we might spuriously return from select() with EINTR; just select again.
  314. if (error_or_marked_fd_count.is_error()) {
  315. if (error_or_marked_fd_count.error().code() == EINTR)
  316. goto try_select_again;
  317. dbgln("EventLoopImplementationUnix::wait_for_events: {}", error_or_marked_fd_count.error());
  318. VERIFY_NOT_REACHED();
  319. }
  320. // We woke up due to a call to wake() or a POSIX signal.
  321. // Handle signals and see whether we need to handle events as well.
  322. if (has_flag(thread_data.poll_fds[0].revents, POLLIN)) {
  323. int wake_events[8];
  324. ssize_t nread;
  325. // We might receive another signal while read()ing here. The signal will go to the handle_signal properly,
  326. // but we get interrupted. Therefore, just retry while we were interrupted.
  327. do {
  328. errno = 0;
  329. nread = read(thread_data.wake_pipe_fds[0], wake_events, sizeof(wake_events));
  330. if (nread == 0)
  331. break;
  332. } while (nread < 0 && errno == EINTR);
  333. if (nread < 0) {
  334. perror("EventLoopImplementationUnix::wait_for_events: read from wake pipe");
  335. VERIFY_NOT_REACHED();
  336. }
  337. VERIFY(nread > 0);
  338. bool wake_requested = false;
  339. int event_count = nread / sizeof(wake_events[0]);
  340. for (int i = 0; i < event_count; i++) {
  341. if (wake_events[i] != 0)
  342. dispatch_signal(wake_events[i]);
  343. else
  344. wake_requested = true;
  345. }
  346. if (!wake_requested && nread == sizeof(wake_events))
  347. goto retry;
  348. }
  349. if (error_or_marked_fd_count.value() != 0) {
  350. // Handle file system notifiers by making them normal events.
  351. for (size_t i = 1; i < thread_data.poll_fds.size(); ++i) {
  352. auto& revents = thread_data.poll_fds[i].revents;
  353. auto& notifier = *thread_data.notifier_by_index[i];
  354. NotificationType type = NotificationType::None;
  355. if (has_flag(revents, POLLIN))
  356. type |= NotificationType::Read;
  357. if (has_flag(revents, POLLOUT))
  358. type |= NotificationType::Write;
  359. if (has_flag(revents, POLLHUP))
  360. type |= NotificationType::HangUp;
  361. if (has_flag(revents, POLLERR))
  362. type |= NotificationType::Error;
  363. type &= notifier.type();
  364. if (type != NotificationType::None)
  365. ThreadEventQueue::current().post_event(notifier, make<NotifierActivationEvent>(notifier.fd(), type));
  366. }
  367. }
  368. // Handle expired timers.
  369. thread_data.timeouts.fire_expired(time_after_poll);
  370. }
  371. class SignalHandlers : public RefCounted<SignalHandlers> {
  372. AK_MAKE_NONCOPYABLE(SignalHandlers);
  373. AK_MAKE_NONMOVABLE(SignalHandlers);
  374. public:
  375. SignalHandlers(int signal_number, void (*handle_signal)(int));
  376. ~SignalHandlers();
  377. void dispatch();
  378. int add(Function<void(int)>&& handler);
  379. bool remove(int handler_id);
  380. bool is_empty() const
  381. {
  382. if (m_calling_handlers) {
  383. for (auto& handler : m_handlers_pending) {
  384. if (handler.value)
  385. return false; // an add is pending
  386. }
  387. }
  388. return m_handlers.is_empty();
  389. }
  390. bool have(int handler_id) const
  391. {
  392. if (m_calling_handlers) {
  393. auto it = m_handlers_pending.find(handler_id);
  394. if (it != m_handlers_pending.end()) {
  395. if (!it->value)
  396. return false; // a deletion is pending
  397. }
  398. }
  399. return m_handlers.contains(handler_id);
  400. }
  401. int m_signal_number;
  402. void (*m_original_handler)(int); // TODO: can't use sighandler_t?
  403. HashMap<int, Function<void(int)>> m_handlers;
  404. HashMap<int, Function<void(int)>> m_handlers_pending;
  405. bool m_calling_handlers { false };
  406. };
  407. struct SignalHandlersInfo {
  408. HashMap<int, NonnullRefPtr<SignalHandlers>> signal_handlers;
  409. int next_signal_id { 0 };
  410. };
  411. static Singleton<SignalHandlersInfo> s_signals;
  412. template<bool create_if_null = true>
  413. inline SignalHandlersInfo* signals_info()
  414. {
  415. return s_signals.ptr();
  416. }
  417. void EventLoopManagerUnix::dispatch_signal(int signal_number)
  418. {
  419. auto& info = *signals_info();
  420. auto handlers = info.signal_handlers.find(signal_number);
  421. if (handlers != info.signal_handlers.end()) {
  422. // Make sure we bump the ref count while dispatching the handlers!
  423. // This allows a handler to unregister/register while the handlers
  424. // are being called!
  425. auto handler = handlers->value;
  426. handler->dispatch();
  427. }
  428. }
  429. void EventLoopImplementationUnix::notify_forked_and_in_child()
  430. {
  431. auto& thread_data = ThreadData::the();
  432. thread_data.timeouts.clear();
  433. thread_data.poll_fds.clear();
  434. thread_data.notifier_by_ptr.clear();
  435. thread_data.notifier_by_index.clear();
  436. thread_data.initialize_wake_pipe();
  437. if (auto* info = signals_info<false>()) {
  438. info->signal_handlers.clear();
  439. info->next_signal_id = 0;
  440. }
  441. thread_data.pid = getpid();
  442. }
  443. SignalHandlers::SignalHandlers(int signal_number, void (*handle_signal)(int))
  444. : m_signal_number(signal_number)
  445. , m_original_handler(signal(signal_number, handle_signal))
  446. {
  447. }
  448. SignalHandlers::~SignalHandlers()
  449. {
  450. signal(m_signal_number, m_original_handler);
  451. }
  452. void SignalHandlers::dispatch()
  453. {
  454. TemporaryChange change(m_calling_handlers, true);
  455. for (auto& handler : m_handlers)
  456. handler.value(m_signal_number);
  457. if (!m_handlers_pending.is_empty()) {
  458. // Apply pending adds/removes
  459. for (auto& handler : m_handlers_pending) {
  460. if (handler.value) {
  461. auto result = m_handlers.set(handler.key, move(handler.value));
  462. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  463. } else {
  464. m_handlers.remove(handler.key);
  465. }
  466. }
  467. m_handlers_pending.clear();
  468. }
  469. }
  470. int SignalHandlers::add(Function<void(int)>&& handler)
  471. {
  472. int id = ++signals_info()->next_signal_id; // TODO: worry about wrapping and duplicates?
  473. if (m_calling_handlers)
  474. m_handlers_pending.set(id, move(handler));
  475. else
  476. m_handlers.set(id, move(handler));
  477. return id;
  478. }
  479. bool SignalHandlers::remove(int handler_id)
  480. {
  481. VERIFY(handler_id != 0);
  482. if (m_calling_handlers) {
  483. auto it = m_handlers.find(handler_id);
  484. if (it != m_handlers.end()) {
  485. // Mark pending remove
  486. m_handlers_pending.set(handler_id, {});
  487. return true;
  488. }
  489. it = m_handlers_pending.find(handler_id);
  490. if (it != m_handlers_pending.end()) {
  491. if (!it->value)
  492. return false; // already was marked as deleted
  493. it->value = nullptr;
  494. return true;
  495. }
  496. return false;
  497. }
  498. return m_handlers.remove(handler_id);
  499. }
  500. void EventLoopManagerUnix::handle_signal(int signal_number)
  501. {
  502. VERIFY(signal_number != 0);
  503. auto& thread_data = ThreadData::the();
  504. // We MUST check if the current pid still matches, because there
  505. // is a window between fork() and exec() where a signal delivered
  506. // to our fork could be inadvertently routed to the parent process!
  507. if (getpid() == thread_data.pid) {
  508. int nwritten = write(thread_data.wake_pipe_fds[1], &signal_number, sizeof(signal_number));
  509. if (nwritten < 0) {
  510. perror("EventLoopImplementationUnix::register_signal: write");
  511. VERIFY_NOT_REACHED();
  512. }
  513. } else {
  514. // We're a fork who received a signal, reset thread_data.pid.
  515. thread_data.pid = getpid();
  516. }
  517. }
  518. int EventLoopManagerUnix::register_signal(int signal_number, Function<void(int)> handler)
  519. {
  520. VERIFY(signal_number != 0);
  521. auto& info = *signals_info();
  522. auto handlers = info.signal_handlers.find(signal_number);
  523. if (handlers == info.signal_handlers.end()) {
  524. auto signal_handlers = adopt_ref(*new SignalHandlers(signal_number, EventLoopManagerUnix::handle_signal));
  525. auto handler_id = signal_handlers->add(move(handler));
  526. info.signal_handlers.set(signal_number, move(signal_handlers));
  527. return handler_id;
  528. } else {
  529. return handlers->value->add(move(handler));
  530. }
  531. }
  532. void EventLoopManagerUnix::unregister_signal(int handler_id)
  533. {
  534. VERIFY(handler_id != 0);
  535. int remove_signal_number = 0;
  536. auto& info = *signals_info();
  537. for (auto& h : info.signal_handlers) {
  538. auto& handlers = *h.value;
  539. if (handlers.remove(handler_id)) {
  540. if (handlers.is_empty())
  541. remove_signal_number = handlers.m_signal_number;
  542. break;
  543. }
  544. }
  545. if (remove_signal_number != 0)
  546. info.signal_handlers.remove(remove_signal_number);
  547. }
  548. intptr_t EventLoopManagerUnix::register_timer(EventReceiver& object, int milliseconds, bool should_reload, TimerShouldFireWhenNotVisible fire_when_not_visible)
  549. {
  550. VERIFY(milliseconds >= 0);
  551. auto& thread_data = ThreadData::the();
  552. auto timer = new EventLoopTimer;
  553. timer->owner_thread = s_thread_id;
  554. timer->owner = object;
  555. timer->interval = Duration::from_milliseconds(milliseconds);
  556. timer->reload(MonotonicTime::now_coarse());
  557. timer->should_reload = should_reload;
  558. timer->fire_when_not_visible = fire_when_not_visible;
  559. thread_data.timeouts.schedule_absolute(timer);
  560. return bit_cast<intptr_t>(timer);
  561. }
  562. void EventLoopManagerUnix::unregister_timer(intptr_t timer_id)
  563. {
  564. auto* timer = bit_cast<EventLoopTimer*>(timer_id);
  565. auto& thread_data = ThreadData::for_thread(timer->owner_thread);
  566. auto expected = false;
  567. if (timer->is_being_deleted.compare_exchange_strong(expected, true, AK::MemoryOrder::memory_order_acq_rel)) {
  568. if (timer->is_scheduled())
  569. thread_data.timeouts.unschedule(timer);
  570. delete timer;
  571. }
  572. }
  573. void EventLoopManagerUnix::register_notifier(Notifier& notifier)
  574. {
  575. auto& thread_data = ThreadData::the();
  576. thread_data.notifier_by_ptr.set(&notifier, thread_data.poll_fds.size());
  577. thread_data.notifier_by_index.append(&notifier);
  578. thread_data.poll_fds.append({
  579. .fd = notifier.fd(),
  580. .events = notification_type_to_poll_events(notifier.type()),
  581. .revents = 0,
  582. });
  583. notifier.set_owner_thread(s_thread_id);
  584. }
  585. void EventLoopManagerUnix::unregister_notifier(Notifier& notifier)
  586. {
  587. auto& thread_data = ThreadData::for_thread(notifier.owner_thread());
  588. auto it = thread_data.notifier_by_ptr.find(&notifier);
  589. VERIFY(it != thread_data.notifier_by_ptr.end());
  590. size_t notifier_index = it->value;
  591. thread_data.notifier_by_ptr.remove(it);
  592. if (notifier_index + 1 != thread_data.poll_fds.size()) {
  593. swap(thread_data.poll_fds[notifier_index], thread_data.poll_fds.last());
  594. swap(thread_data.notifier_by_index[notifier_index], thread_data.notifier_by_index.last());
  595. thread_data.notifier_by_ptr.set(thread_data.notifier_by_index[notifier_index], notifier_index);
  596. }
  597. thread_data.poll_fds.take_last();
  598. thread_data.notifier_by_index.take_last();
  599. }
  600. void EventLoopManagerUnix::did_post_event()
  601. {
  602. }
  603. EventLoopManagerUnix::~EventLoopManagerUnix() = default;
  604. NonnullOwnPtr<EventLoopImplementation> EventLoopManagerUnix::make_implementation()
  605. {
  606. return adopt_own(*new EventLoopImplementationUnix);
  607. }
  608. }