EventLoopImplementationUnix.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /*
  2. * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BinaryHeap.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/TemporaryChange.h>
  9. #include <AK/Time.h>
  10. #include <AK/WeakPtr.h>
  11. #include <LibCore/Event.h>
  12. #include <LibCore/EventLoopImplementationUnix.h>
  13. #include <LibCore/EventReceiver.h>
  14. #include <LibCore/Notifier.h>
  15. #include <LibCore/Socket.h>
  16. #include <LibCore/System.h>
  17. #include <LibCore/ThreadEventQueue.h>
  18. #include <pthread.h>
  19. #include <sys/select.h>
  20. #include <unistd.h>
  21. namespace Core {
  22. namespace {
  23. struct ThreadData;
  24. class TimeoutSet;
  25. HashMap<pthread_t, ThreadData*> s_thread_data;
  26. static pthread_rwlock_t s_thread_data_lock_impl;
  27. static pthread_rwlock_t* s_thread_data_lock = nullptr;
  28. thread_local pthread_t s_thread_id;
  29. short notification_type_to_poll_events(NotificationType type)
  30. {
  31. short events = 0;
  32. if (has_flag(type, NotificationType::Read))
  33. events |= POLLIN;
  34. if (has_flag(type, NotificationType::Write))
  35. events |= POLLOUT;
  36. return events;
  37. }
  38. bool has_flag(int value, int flag)
  39. {
  40. return (value & flag) == flag;
  41. }
  42. class EventLoopTimeout {
  43. public:
  44. static constexpr ssize_t INVALID_INDEX = NumericLimits<ssize_t>::max();
  45. EventLoopTimeout() { }
  46. virtual ~EventLoopTimeout() = default;
  47. virtual void fire(TimeoutSet& timeout_set, MonotonicTime time) = 0;
  48. MonotonicTime fire_time() const { return m_fire_time; }
  49. void absolutize(Badge<TimeoutSet>, MonotonicTime current_time)
  50. {
  51. m_fire_time = current_time + m_duration;
  52. }
  53. ssize_t& index(Badge<TimeoutSet>) { return m_index; }
  54. void set_index(Badge<TimeoutSet>, ssize_t index) { m_index = index; }
  55. bool is_scheduled() const { return m_index != INVALID_INDEX; }
  56. protected:
  57. union {
  58. Duration m_duration;
  59. MonotonicTime m_fire_time;
  60. };
  61. private:
  62. ssize_t m_index = INVALID_INDEX;
  63. };
  64. class TimeoutSet {
  65. public:
  66. TimeoutSet() = default;
  67. Optional<MonotonicTime> next_timer_expiration()
  68. {
  69. if (!m_heap.is_empty()) {
  70. return m_heap.peek_min()->fire_time();
  71. } else {
  72. return {};
  73. }
  74. }
  75. void absolutize_relative_timeouts(MonotonicTime current_time)
  76. {
  77. for (auto timeout : m_scheduled_timeouts) {
  78. timeout->absolutize({}, current_time);
  79. m_heap.insert(timeout);
  80. }
  81. m_scheduled_timeouts.clear();
  82. }
  83. size_t fire_expired(MonotonicTime current_time)
  84. {
  85. size_t fired_count = 0;
  86. while (!m_heap.is_empty()) {
  87. auto& timeout = *m_heap.peek_min();
  88. if (timeout.fire_time() <= current_time) {
  89. ++fired_count;
  90. m_heap.pop_min();
  91. timeout.set_index({}, EventLoopTimeout::INVALID_INDEX);
  92. timeout.fire(*this, current_time);
  93. } else {
  94. break;
  95. }
  96. }
  97. return fired_count;
  98. }
  99. void schedule_relative(EventLoopTimeout* timeout)
  100. {
  101. timeout->set_index({}, -1 - static_cast<ssize_t>(m_scheduled_timeouts.size()));
  102. m_scheduled_timeouts.append(timeout);
  103. }
  104. void schedule_absolute(EventLoopTimeout* timeout)
  105. {
  106. m_heap.insert(timeout);
  107. }
  108. void unschedule(EventLoopTimeout* timeout)
  109. {
  110. if (timeout->index({}) < 0) {
  111. size_t i = -1 - timeout->index({});
  112. size_t j = m_scheduled_timeouts.size() - 1;
  113. VERIFY(m_scheduled_timeouts[i] == timeout);
  114. swap(m_scheduled_timeouts[i], m_scheduled_timeouts[j]);
  115. swap(m_scheduled_timeouts[i]->index({}), m_scheduled_timeouts[j]->index({}));
  116. (void)m_scheduled_timeouts.take_last();
  117. } else {
  118. m_heap.pop(timeout->index({}));
  119. }
  120. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  121. }
  122. void clear()
  123. {
  124. for (auto* timeout : m_heap.nodes_in_arbitrary_order())
  125. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  126. m_heap.clear();
  127. for (auto* timeout : m_scheduled_timeouts)
  128. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  129. m_scheduled_timeouts.clear();
  130. }
  131. private:
  132. IntrusiveBinaryHeap<
  133. EventLoopTimeout*,
  134. decltype([](EventLoopTimeout* a, EventLoopTimeout* b) {
  135. return a->fire_time() < b->fire_time();
  136. }),
  137. decltype([](EventLoopTimeout* timeout, size_t index) {
  138. timeout->set_index({}, static_cast<ssize_t>(index));
  139. }),
  140. 8>
  141. m_heap;
  142. Vector<EventLoopTimeout*, 8> m_scheduled_timeouts;
  143. };
  144. class EventLoopTimer final : public EventLoopTimeout {
  145. public:
  146. EventLoopTimer() = default;
  147. void reload(MonotonicTime const& now) { m_fire_time = now + interval; }
  148. virtual void fire(TimeoutSet& timeout_set, MonotonicTime current_time) override
  149. {
  150. auto strong_owner = owner.strong_ref();
  151. if (!strong_owner)
  152. return;
  153. if (should_reload) {
  154. MonotonicTime next_fire_time = m_fire_time + interval;
  155. if (next_fire_time <= current_time) {
  156. next_fire_time = current_time + interval;
  157. }
  158. m_fire_time = next_fire_time;
  159. if (next_fire_time != current_time) {
  160. timeout_set.schedule_absolute(this);
  161. } else {
  162. // NOTE: Unfortunately we need to treat timeouts with the zero interval in a
  163. // special way. TimeoutSet::schedule_absolute for them will result in an
  164. // infinite loop. TimeoutSet::schedule_relative, on the other hand, will do a
  165. // correct thing of scheduling them for the next iteration of the loop.
  166. m_duration = {};
  167. timeout_set.schedule_relative(this);
  168. }
  169. }
  170. // FIXME: While TimerShouldFireWhenNotVisible::Yes prevents the timer callback from being
  171. // called, it doesn't allow event loop to sleep since it needs to constantly check if
  172. // is_visible_for_timer_purposes changed. A better solution will be to unregister a
  173. // timer and register it back again when needed. This also has an added benefit of
  174. // making fire_when_not_visible and is_visible_for_timer_purposes obsolete.
  175. if (fire_when_not_visible == TimerShouldFireWhenNotVisible::Yes || strong_owner->is_visible_for_timer_purposes())
  176. ThreadEventQueue::current().post_event(*strong_owner, make<TimerEvent>());
  177. }
  178. Duration interval;
  179. bool should_reload { false };
  180. TimerShouldFireWhenNotVisible fire_when_not_visible { TimerShouldFireWhenNotVisible::No };
  181. WeakPtr<EventReceiver> owner;
  182. pthread_t owner_thread { 0 };
  183. Atomic<bool> is_being_deleted { false };
  184. };
  185. struct ThreadData {
  186. static ThreadData& the()
  187. {
  188. if (!s_thread_data_lock) {
  189. pthread_rwlock_init(&s_thread_data_lock_impl, nullptr);
  190. s_thread_data_lock = &s_thread_data_lock_impl;
  191. }
  192. if (s_thread_id == 0)
  193. s_thread_id = pthread_self();
  194. ThreadData* data = nullptr;
  195. pthread_rwlock_rdlock(&*s_thread_data_lock);
  196. if (!s_thread_data.contains(s_thread_id)) {
  197. // FIXME: Don't leak this.
  198. data = new ThreadData;
  199. pthread_rwlock_unlock(&*s_thread_data_lock);
  200. pthread_rwlock_wrlock(&*s_thread_data_lock);
  201. s_thread_data.set(s_thread_id, data);
  202. } else {
  203. data = s_thread_data.get(s_thread_id).value();
  204. }
  205. pthread_rwlock_unlock(&*s_thread_data_lock);
  206. return *data;
  207. }
  208. static ThreadData& for_thread(pthread_t thread_id)
  209. {
  210. pthread_rwlock_rdlock(&*s_thread_data_lock);
  211. auto& result = *s_thread_data.get(thread_id).value();
  212. pthread_rwlock_unlock(&*s_thread_data_lock);
  213. return result;
  214. }
  215. ThreadData()
  216. {
  217. pid = getpid();
  218. initialize_wake_pipe();
  219. }
  220. void initialize_wake_pipe()
  221. {
  222. if (wake_pipe_fds[0] != -1)
  223. close(wake_pipe_fds[0]);
  224. if (wake_pipe_fds[1] != -1)
  225. close(wake_pipe_fds[1]);
  226. wake_pipe_fds = MUST(Core::System::pipe2(O_CLOEXEC));
  227. // The wake pipe informs us of POSIX signals as well as manual calls to wake()
  228. VERIFY(poll_fds.size() == 0);
  229. poll_fds.append({ .fd = wake_pipe_fds[0], .events = POLLIN, .revents = 0 });
  230. notifier_by_index.append(nullptr);
  231. }
  232. // Each thread has its own timers, notifiers and a wake pipe.
  233. TimeoutSet timeouts;
  234. Vector<pollfd> poll_fds;
  235. HashMap<Notifier*, size_t> notifier_by_ptr;
  236. Vector<Notifier*> notifier_by_index;
  237. // The wake pipe is used to notify another event loop that someone has called wake(), or a signal has been received.
  238. // wake() writes 0i32 into the pipe, signals write the signal number (guaranteed non-zero).
  239. Array<int, 2> wake_pipe_fds { -1, -1 };
  240. pid_t pid { 0 };
  241. };
  242. }
  243. EventLoopImplementationUnix::EventLoopImplementationUnix()
  244. : m_wake_pipe_fds(ThreadData::the().wake_pipe_fds)
  245. {
  246. }
  247. EventLoopImplementationUnix::~EventLoopImplementationUnix() = default;
  248. int EventLoopImplementationUnix::exec()
  249. {
  250. for (;;) {
  251. if (m_exit_requested)
  252. return m_exit_code;
  253. pump(PumpMode::WaitForEvents);
  254. }
  255. VERIFY_NOT_REACHED();
  256. }
  257. size_t EventLoopImplementationUnix::pump(PumpMode mode)
  258. {
  259. static_cast<EventLoopManagerUnix&>(EventLoopManager::the()).wait_for_events(mode);
  260. return ThreadEventQueue::current().process();
  261. }
  262. void EventLoopImplementationUnix::quit(int code)
  263. {
  264. m_exit_requested = true;
  265. m_exit_code = code;
  266. }
  267. void EventLoopImplementationUnix::unquit()
  268. {
  269. m_exit_requested = false;
  270. m_exit_code = 0;
  271. }
  272. bool EventLoopImplementationUnix::was_exit_requested() const
  273. {
  274. return m_exit_requested;
  275. }
  276. void EventLoopImplementationUnix::post_event(EventReceiver& receiver, NonnullOwnPtr<Event>&& event)
  277. {
  278. m_thread_event_queue.post_event(receiver, move(event));
  279. if (&m_thread_event_queue != &ThreadEventQueue::current())
  280. wake();
  281. }
  282. void EventLoopImplementationUnix::wake()
  283. {
  284. int wake_event = 0;
  285. MUST(Core::System::write(m_wake_pipe_fds[1], { &wake_event, sizeof(wake_event) }));
  286. }
  287. void EventLoopManagerUnix::wait_for_events(EventLoopImplementation::PumpMode mode)
  288. {
  289. auto& thread_data = ThreadData::the();
  290. retry:
  291. bool has_pending_events = ThreadEventQueue::current().has_pending_events();
  292. auto time_at_iteration_start = MonotonicTime::now_coarse();
  293. thread_data.timeouts.absolutize_relative_timeouts(time_at_iteration_start);
  294. // Figure out how long to wait at maximum.
  295. // This mainly depends on the PumpMode and whether we have pending events, but also the next expiring timer.
  296. int timeout = 0;
  297. bool should_wait_forever = false;
  298. if (mode == EventLoopImplementation::PumpMode::WaitForEvents && !has_pending_events) {
  299. auto next_timer_expiration = thread_data.timeouts.next_timer_expiration();
  300. if (next_timer_expiration.has_value()) {
  301. auto computed_timeout = next_timer_expiration.value() - time_at_iteration_start;
  302. if (computed_timeout.is_negative())
  303. computed_timeout = Duration::zero();
  304. i64 true_timeout = computed_timeout.to_milliseconds();
  305. timeout = static_cast<i32>(min<i64>(AK::NumericLimits<i32>::max(), true_timeout));
  306. } else {
  307. should_wait_forever = true;
  308. }
  309. }
  310. try_select_again:
  311. // select() and wait for file system events, calls to wake(), POSIX signals, or timer expirations.
  312. ErrorOr<int> error_or_marked_fd_count = System::poll(thread_data.poll_fds, should_wait_forever ? -1 : timeout);
  313. auto time_after_poll = MonotonicTime::now_coarse();
  314. // Because POSIX, we might spuriously return from select() with EINTR; just select again.
  315. if (error_or_marked_fd_count.is_error()) {
  316. if (error_or_marked_fd_count.error().code() == EINTR)
  317. goto try_select_again;
  318. dbgln("EventLoopImplementationUnix::wait_for_events: {}", error_or_marked_fd_count.error());
  319. VERIFY_NOT_REACHED();
  320. }
  321. // We woke up due to a call to wake() or a POSIX signal.
  322. // Handle signals and see whether we need to handle events as well.
  323. if (has_flag(thread_data.poll_fds[0].revents, POLLIN)) {
  324. int wake_events[8];
  325. ssize_t nread;
  326. // We might receive another signal while read()ing here. The signal will go to the handle_signal properly,
  327. // but we get interrupted. Therefore, just retry while we were interrupted.
  328. do {
  329. errno = 0;
  330. nread = read(thread_data.wake_pipe_fds[0], wake_events, sizeof(wake_events));
  331. if (nread == 0)
  332. break;
  333. } while (nread < 0 && errno == EINTR);
  334. if (nread < 0) {
  335. perror("EventLoopImplementationUnix::wait_for_events: read from wake pipe");
  336. VERIFY_NOT_REACHED();
  337. }
  338. VERIFY(nread > 0);
  339. bool wake_requested = false;
  340. int event_count = nread / sizeof(wake_events[0]);
  341. for (int i = 0; i < event_count; i++) {
  342. if (wake_events[i] != 0)
  343. dispatch_signal(wake_events[i]);
  344. else
  345. wake_requested = true;
  346. }
  347. if (!wake_requested && nread == sizeof(wake_events))
  348. goto retry;
  349. }
  350. if (error_or_marked_fd_count.value() != 0) {
  351. // Handle file system notifiers by making them normal events.
  352. for (size_t i = 1; i < thread_data.poll_fds.size(); ++i) {
  353. auto& revents = thread_data.poll_fds[i].revents;
  354. auto& notifier = *thread_data.notifier_by_index[i];
  355. NotificationType type = NotificationType::None;
  356. if (has_flag(revents, POLLIN))
  357. type |= NotificationType::Read;
  358. if (has_flag(revents, POLLOUT))
  359. type |= NotificationType::Write;
  360. if (has_flag(revents, POLLHUP))
  361. type |= NotificationType::HangUp;
  362. if (has_flag(revents, POLLERR))
  363. type |= NotificationType::Error;
  364. type &= notifier.type();
  365. if (type != NotificationType::None)
  366. ThreadEventQueue::current().post_event(notifier, make<NotifierActivationEvent>(notifier.fd(), type));
  367. }
  368. }
  369. // Handle expired timers.
  370. thread_data.timeouts.fire_expired(time_after_poll);
  371. }
  372. class SignalHandlers : public RefCounted<SignalHandlers> {
  373. AK_MAKE_NONCOPYABLE(SignalHandlers);
  374. AK_MAKE_NONMOVABLE(SignalHandlers);
  375. public:
  376. SignalHandlers(int signal_number, void (*handle_signal)(int));
  377. ~SignalHandlers();
  378. void dispatch();
  379. int add(Function<void(int)>&& handler);
  380. bool remove(int handler_id);
  381. bool is_empty() const
  382. {
  383. if (m_calling_handlers) {
  384. for (auto& handler : m_handlers_pending) {
  385. if (handler.value)
  386. return false; // an add is pending
  387. }
  388. }
  389. return m_handlers.is_empty();
  390. }
  391. bool have(int handler_id) const
  392. {
  393. if (m_calling_handlers) {
  394. auto it = m_handlers_pending.find(handler_id);
  395. if (it != m_handlers_pending.end()) {
  396. if (!it->value)
  397. return false; // a deletion is pending
  398. }
  399. }
  400. return m_handlers.contains(handler_id);
  401. }
  402. int m_signal_number;
  403. void (*m_original_handler)(int); // TODO: can't use sighandler_t?
  404. HashMap<int, Function<void(int)>> m_handlers;
  405. HashMap<int, Function<void(int)>> m_handlers_pending;
  406. bool m_calling_handlers { false };
  407. };
  408. struct SignalHandlersInfo {
  409. HashMap<int, NonnullRefPtr<SignalHandlers>> signal_handlers;
  410. int next_signal_id { 0 };
  411. };
  412. static Singleton<SignalHandlersInfo> s_signals;
  413. template<bool create_if_null = true>
  414. inline SignalHandlersInfo* signals_info()
  415. {
  416. return s_signals.ptr();
  417. }
  418. void EventLoopManagerUnix::dispatch_signal(int signal_number)
  419. {
  420. auto& info = *signals_info();
  421. auto handlers = info.signal_handlers.find(signal_number);
  422. if (handlers != info.signal_handlers.end()) {
  423. // Make sure we bump the ref count while dispatching the handlers!
  424. // This allows a handler to unregister/register while the handlers
  425. // are being called!
  426. auto handler = handlers->value;
  427. handler->dispatch();
  428. }
  429. }
  430. void EventLoopImplementationUnix::notify_forked_and_in_child()
  431. {
  432. auto& thread_data = ThreadData::the();
  433. thread_data.timeouts.clear();
  434. thread_data.poll_fds.clear();
  435. thread_data.notifier_by_ptr.clear();
  436. thread_data.notifier_by_index.clear();
  437. thread_data.initialize_wake_pipe();
  438. if (auto* info = signals_info<false>()) {
  439. info->signal_handlers.clear();
  440. info->next_signal_id = 0;
  441. }
  442. thread_data.pid = getpid();
  443. }
  444. SignalHandlers::SignalHandlers(int signal_number, void (*handle_signal)(int))
  445. : m_signal_number(signal_number)
  446. , m_original_handler(signal(signal_number, handle_signal))
  447. {
  448. }
  449. SignalHandlers::~SignalHandlers()
  450. {
  451. signal(m_signal_number, m_original_handler);
  452. }
  453. void SignalHandlers::dispatch()
  454. {
  455. TemporaryChange change(m_calling_handlers, true);
  456. for (auto& handler : m_handlers)
  457. handler.value(m_signal_number);
  458. if (!m_handlers_pending.is_empty()) {
  459. // Apply pending adds/removes
  460. for (auto& handler : m_handlers_pending) {
  461. if (handler.value) {
  462. auto result = m_handlers.set(handler.key, move(handler.value));
  463. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  464. } else {
  465. m_handlers.remove(handler.key);
  466. }
  467. }
  468. m_handlers_pending.clear();
  469. }
  470. }
  471. int SignalHandlers::add(Function<void(int)>&& handler)
  472. {
  473. int id = ++signals_info()->next_signal_id; // TODO: worry about wrapping and duplicates?
  474. if (m_calling_handlers)
  475. m_handlers_pending.set(id, move(handler));
  476. else
  477. m_handlers.set(id, move(handler));
  478. return id;
  479. }
  480. bool SignalHandlers::remove(int handler_id)
  481. {
  482. VERIFY(handler_id != 0);
  483. if (m_calling_handlers) {
  484. auto it = m_handlers.find(handler_id);
  485. if (it != m_handlers.end()) {
  486. // Mark pending remove
  487. m_handlers_pending.set(handler_id, {});
  488. return true;
  489. }
  490. it = m_handlers_pending.find(handler_id);
  491. if (it != m_handlers_pending.end()) {
  492. if (!it->value)
  493. return false; // already was marked as deleted
  494. it->value = nullptr;
  495. return true;
  496. }
  497. return false;
  498. }
  499. return m_handlers.remove(handler_id);
  500. }
  501. void EventLoopManagerUnix::handle_signal(int signal_number)
  502. {
  503. VERIFY(signal_number != 0);
  504. auto& thread_data = ThreadData::the();
  505. // We MUST check if the current pid still matches, because there
  506. // is a window between fork() and exec() where a signal delivered
  507. // to our fork could be inadvertently routed to the parent process!
  508. if (getpid() == thread_data.pid) {
  509. int nwritten = write(thread_data.wake_pipe_fds[1], &signal_number, sizeof(signal_number));
  510. if (nwritten < 0) {
  511. perror("EventLoopImplementationUnix::register_signal: write");
  512. VERIFY_NOT_REACHED();
  513. }
  514. } else {
  515. // We're a fork who received a signal, reset thread_data.pid.
  516. thread_data.pid = getpid();
  517. }
  518. }
  519. int EventLoopManagerUnix::register_signal(int signal_number, Function<void(int)> handler)
  520. {
  521. VERIFY(signal_number != 0);
  522. auto& info = *signals_info();
  523. auto handlers = info.signal_handlers.find(signal_number);
  524. if (handlers == info.signal_handlers.end()) {
  525. auto signal_handlers = adopt_ref(*new SignalHandlers(signal_number, EventLoopManagerUnix::handle_signal));
  526. auto handler_id = signal_handlers->add(move(handler));
  527. info.signal_handlers.set(signal_number, move(signal_handlers));
  528. return handler_id;
  529. } else {
  530. return handlers->value->add(move(handler));
  531. }
  532. }
  533. void EventLoopManagerUnix::unregister_signal(int handler_id)
  534. {
  535. VERIFY(handler_id != 0);
  536. int remove_signal_number = 0;
  537. auto& info = *signals_info();
  538. for (auto& h : info.signal_handlers) {
  539. auto& handlers = *h.value;
  540. if (handlers.remove(handler_id)) {
  541. if (handlers.is_empty())
  542. remove_signal_number = handlers.m_signal_number;
  543. break;
  544. }
  545. }
  546. if (remove_signal_number != 0)
  547. info.signal_handlers.remove(remove_signal_number);
  548. }
  549. intptr_t EventLoopManagerUnix::register_timer(EventReceiver& object, int milliseconds, bool should_reload, TimerShouldFireWhenNotVisible fire_when_not_visible)
  550. {
  551. VERIFY(milliseconds >= 0);
  552. auto& thread_data = ThreadData::the();
  553. auto timer = new EventLoopTimer;
  554. timer->owner_thread = s_thread_id;
  555. timer->owner = object;
  556. timer->interval = Duration::from_milliseconds(milliseconds);
  557. timer->reload(MonotonicTime::now_coarse());
  558. timer->should_reload = should_reload;
  559. timer->fire_when_not_visible = fire_when_not_visible;
  560. thread_data.timeouts.schedule_absolute(timer);
  561. return bit_cast<intptr_t>(timer);
  562. }
  563. void EventLoopManagerUnix::unregister_timer(intptr_t timer_id)
  564. {
  565. auto* timer = bit_cast<EventLoopTimer*>(timer_id);
  566. auto& thread_data = ThreadData::for_thread(timer->owner_thread);
  567. auto expected = false;
  568. if (timer->is_being_deleted.compare_exchange_strong(expected, true, AK::MemoryOrder::memory_order_acq_rel)) {
  569. if (timer->is_scheduled())
  570. thread_data.timeouts.unschedule(timer);
  571. delete timer;
  572. }
  573. }
  574. void EventLoopManagerUnix::register_notifier(Notifier& notifier)
  575. {
  576. auto& thread_data = ThreadData::the();
  577. thread_data.notifier_by_ptr.set(&notifier, thread_data.poll_fds.size());
  578. thread_data.notifier_by_index.append(&notifier);
  579. thread_data.poll_fds.append({
  580. .fd = notifier.fd(),
  581. .events = notification_type_to_poll_events(notifier.type()),
  582. .revents = 0,
  583. });
  584. notifier.set_owner_thread(s_thread_id);
  585. }
  586. void EventLoopManagerUnix::unregister_notifier(Notifier& notifier)
  587. {
  588. auto& thread_data = ThreadData::for_thread(notifier.owner_thread());
  589. auto it = thread_data.notifier_by_ptr.find(&notifier);
  590. VERIFY(it != thread_data.notifier_by_ptr.end());
  591. size_t notifier_index = it->value;
  592. thread_data.notifier_by_ptr.remove(it);
  593. if (notifier_index + 1 != thread_data.poll_fds.size()) {
  594. swap(thread_data.poll_fds[notifier_index], thread_data.poll_fds.last());
  595. swap(thread_data.notifier_by_index[notifier_index], thread_data.notifier_by_index.last());
  596. thread_data.notifier_by_ptr.set(thread_data.notifier_by_index[notifier_index], notifier_index);
  597. }
  598. thread_data.poll_fds.take_last();
  599. thread_data.notifier_by_index.take_last();
  600. }
  601. void EventLoopManagerUnix::did_post_event()
  602. {
  603. }
  604. EventLoopManagerUnix::~EventLoopManagerUnix() = default;
  605. NonnullOwnPtr<EventLoopImplementation> EventLoopManagerUnix::make_implementation()
  606. {
  607. return adopt_own(*new EventLoopImplementationUnix);
  608. }
  609. }