EventLoopImplementationUnix.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BinaryHeap.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/TemporaryChange.h>
  9. #include <AK/Time.h>
  10. #include <AK/WeakPtr.h>
  11. #include <LibCore/Event.h>
  12. #include <LibCore/EventLoopImplementationUnix.h>
  13. #include <LibCore/EventReceiver.h>
  14. #include <LibCore/Notifier.h>
  15. #include <LibCore/Socket.h>
  16. #include <LibCore/System.h>
  17. #include <LibCore/ThreadEventQueue.h>
  18. #include <pthread.h>
  19. #include <sys/select.h>
  20. #include <unistd.h>
  21. namespace Core {
  22. namespace {
  23. struct ThreadData;
  24. class TimeoutSet;
  25. HashMap<pthread_t, ThreadData*> s_thread_data;
  26. static pthread_rwlock_t s_thread_data_lock_impl;
  27. static pthread_rwlock_t* s_thread_data_lock = nullptr;
  28. thread_local pthread_t s_thread_id;
  29. short notification_type_to_poll_events(NotificationType type)
  30. {
  31. short events = 0;
  32. if (has_flag(type, NotificationType::Read))
  33. events |= POLLIN;
  34. if (has_flag(type, NotificationType::Write))
  35. events |= POLLOUT;
  36. return events;
  37. }
  38. bool has_flag(int value, int flag)
  39. {
  40. return (value & flag) == flag;
  41. }
  42. class EventLoopTimeout {
  43. public:
  44. static constexpr ssize_t INVALID_INDEX = NumericLimits<ssize_t>::max();
  45. EventLoopTimeout() { }
  46. virtual ~EventLoopTimeout() = default;
  47. virtual void fire(TimeoutSet& timeout_set, MonotonicTime time) = 0;
  48. MonotonicTime fire_time() const { return m_fire_time; }
  49. void absolutize(Badge<TimeoutSet>, MonotonicTime current_time)
  50. {
  51. m_fire_time = current_time + m_duration;
  52. }
  53. ssize_t& index(Badge<TimeoutSet>) { return m_index; }
  54. void set_index(Badge<TimeoutSet>, ssize_t index) { m_index = index; }
  55. bool is_scheduled() const { return m_index != INVALID_INDEX; }
  56. protected:
  57. union {
  58. Duration m_duration;
  59. MonotonicTime m_fire_time;
  60. };
  61. private:
  62. ssize_t m_index = INVALID_INDEX;
  63. };
  64. class TimeoutSet {
  65. public:
  66. TimeoutSet() = default;
  67. Optional<MonotonicTime> next_timer_expiration()
  68. {
  69. if (!m_heap.is_empty()) {
  70. return m_heap.peek_min()->fire_time();
  71. } else {
  72. return {};
  73. }
  74. }
  75. void absolutize_relative_timeouts(MonotonicTime current_time)
  76. {
  77. for (auto timeout : m_scheduled_timeouts) {
  78. timeout->absolutize({}, current_time);
  79. m_heap.insert(timeout);
  80. }
  81. m_scheduled_timeouts.clear();
  82. }
  83. size_t fire_expired(MonotonicTime current_time)
  84. {
  85. size_t fired_count = 0;
  86. while (!m_heap.is_empty()) {
  87. auto& timeout = *m_heap.peek_min();
  88. if (timeout.fire_time() <= current_time) {
  89. ++fired_count;
  90. m_heap.pop_min();
  91. timeout.set_index({}, EventLoopTimeout::INVALID_INDEX);
  92. timeout.fire(*this, current_time);
  93. } else {
  94. break;
  95. }
  96. }
  97. return fired_count;
  98. }
  99. void schedule_relative(EventLoopTimeout* timeout)
  100. {
  101. timeout->set_index({}, -1 - static_cast<ssize_t>(m_scheduled_timeouts.size()));
  102. m_scheduled_timeouts.append(timeout);
  103. }
  104. void schedule_absolute(EventLoopTimeout* timeout)
  105. {
  106. m_heap.insert(timeout);
  107. }
  108. void unschedule(EventLoopTimeout* timeout)
  109. {
  110. if (timeout->index({}) < 0) {
  111. size_t i = -1 - timeout->index({});
  112. size_t j = m_scheduled_timeouts.size() - 1;
  113. VERIFY(m_scheduled_timeouts[i] == timeout);
  114. swap(m_scheduled_timeouts[i], m_scheduled_timeouts[j]);
  115. swap(m_scheduled_timeouts[i]->index({}), m_scheduled_timeouts[j]->index({}));
  116. (void)m_scheduled_timeouts.take_last();
  117. } else {
  118. m_heap.pop(timeout->index({}));
  119. }
  120. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  121. }
  122. void clear()
  123. {
  124. for (auto* timeout : m_heap.nodes_in_arbitrary_order())
  125. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  126. m_heap.clear();
  127. for (auto* timeout : m_scheduled_timeouts)
  128. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  129. m_scheduled_timeouts.clear();
  130. }
  131. private:
  132. IntrusiveBinaryHeap<
  133. EventLoopTimeout*,
  134. decltype([](EventLoopTimeout* a, EventLoopTimeout* b) {
  135. return a->fire_time() < b->fire_time();
  136. }),
  137. decltype([](EventLoopTimeout* timeout, size_t index) {
  138. timeout->set_index({}, static_cast<ssize_t>(index));
  139. }),
  140. 8>
  141. m_heap;
  142. Vector<EventLoopTimeout*, 8> m_scheduled_timeouts;
  143. };
  144. class EventLoopTimer final : public EventLoopTimeout {
  145. public:
  146. static constexpr auto delay_tolerance = Duration::from_milliseconds(5);
  147. EventLoopTimer() = default;
  148. void reload(MonotonicTime const& now) { m_fire_time = now + interval; }
  149. virtual void fire(TimeoutSet& timeout_set, MonotonicTime current_time) override
  150. {
  151. auto strong_owner = owner.strong_ref();
  152. if (!strong_owner)
  153. return;
  154. if (should_reload) {
  155. MonotonicTime next_fire_time = m_fire_time + interval;
  156. if (next_fire_time <= current_time) {
  157. auto delay = current_time - next_fire_time;
  158. if (delay >= delay_tolerance && !interval.is_zero()) {
  159. auto iterations = delay.to_milliseconds() / max<i64>(1, interval.to_milliseconds()) + 1;
  160. dbgln("Can't keep up! Skipping approximately {} iteration(s) of a reloading timer (delayed by {}ms).", iterations, delay.to_milliseconds());
  161. }
  162. next_fire_time = current_time + interval;
  163. }
  164. m_fire_time = next_fire_time;
  165. if (next_fire_time != current_time) {
  166. timeout_set.schedule_absolute(this);
  167. } else {
  168. // NOTE: Unfortunately we need to treat timeouts with the zero interval in a
  169. // special way. TimeoutSet::schedule_absolute for them will result in an
  170. // infinite loop. TimeoutSet::schedule_relative, on the other hand, will do a
  171. // correct thing of scheduling them for the next iteration of the loop.
  172. m_duration = {};
  173. timeout_set.schedule_relative(this);
  174. }
  175. }
  176. // FIXME: While TimerShouldFireWhenNotVisible::Yes prevents the timer callback from being
  177. // called, it doesn't allow event loop to sleep since it needs to constantly check if
  178. // is_visible_for_timer_purposes changed. A better solution will be to unregister a
  179. // timer and register it back again when needed. This also has an added benefit of
  180. // making fire_when_not_visible and is_visible_for_timer_purposes obsolete.
  181. if (fire_when_not_visible == TimerShouldFireWhenNotVisible::Yes || strong_owner->is_visible_for_timer_purposes())
  182. ThreadEventQueue::current().post_event(*strong_owner, make<TimerEvent>());
  183. }
  184. Duration interval;
  185. bool should_reload { false };
  186. TimerShouldFireWhenNotVisible fire_when_not_visible { TimerShouldFireWhenNotVisible::No };
  187. WeakPtr<EventReceiver> owner;
  188. pthread_t owner_thread { 0 };
  189. Atomic<bool> is_being_deleted { false };
  190. };
  191. struct ThreadData {
  192. static ThreadData& the()
  193. {
  194. if (!s_thread_data_lock) {
  195. pthread_rwlock_init(&s_thread_data_lock_impl, nullptr);
  196. s_thread_data_lock = &s_thread_data_lock_impl;
  197. }
  198. if (s_thread_id == 0)
  199. s_thread_id = pthread_self();
  200. ThreadData* data = nullptr;
  201. pthread_rwlock_rdlock(&*s_thread_data_lock);
  202. if (!s_thread_data.contains(s_thread_id)) {
  203. // FIXME: Don't leak this.
  204. data = new ThreadData;
  205. pthread_rwlock_unlock(&*s_thread_data_lock);
  206. pthread_rwlock_wrlock(&*s_thread_data_lock);
  207. s_thread_data.set(s_thread_id, data);
  208. } else {
  209. data = s_thread_data.get(s_thread_id).value();
  210. }
  211. pthread_rwlock_unlock(&*s_thread_data_lock);
  212. return *data;
  213. }
  214. static ThreadData& for_thread(pthread_t thread_id)
  215. {
  216. pthread_rwlock_rdlock(&*s_thread_data_lock);
  217. auto& result = *s_thread_data.get(thread_id).value();
  218. pthread_rwlock_unlock(&*s_thread_data_lock);
  219. return result;
  220. }
  221. ThreadData()
  222. {
  223. pid = getpid();
  224. initialize_wake_pipe();
  225. }
  226. void initialize_wake_pipe()
  227. {
  228. if (wake_pipe_fds[0] != -1)
  229. close(wake_pipe_fds[0]);
  230. if (wake_pipe_fds[1] != -1)
  231. close(wake_pipe_fds[1]);
  232. wake_pipe_fds = MUST(Core::System::pipe2(O_CLOEXEC));
  233. // The wake pipe informs us of POSIX signals as well as manual calls to wake()
  234. VERIFY(poll_fds.size() == 0);
  235. poll_fds.append({ .fd = wake_pipe_fds[0], .events = POLLIN, .revents = 0 });
  236. notifier_by_index.append(nullptr);
  237. }
  238. // Each thread has its own timers, notifiers and a wake pipe.
  239. TimeoutSet timeouts;
  240. Vector<pollfd> poll_fds;
  241. HashMap<Notifier*, size_t> notifier_by_ptr;
  242. Vector<Notifier*> notifier_by_index;
  243. // The wake pipe is used to notify another event loop that someone has called wake(), or a signal has been received.
  244. // wake() writes 0i32 into the pipe, signals write the signal number (guaranteed non-zero).
  245. Array<int, 2> wake_pipe_fds { -1, -1 };
  246. pid_t pid { 0 };
  247. };
  248. }
  249. EventLoopImplementationUnix::EventLoopImplementationUnix()
  250. : m_wake_pipe_fds(ThreadData::the().wake_pipe_fds)
  251. {
  252. }
  253. EventLoopImplementationUnix::~EventLoopImplementationUnix() = default;
  254. int EventLoopImplementationUnix::exec()
  255. {
  256. for (;;) {
  257. if (m_exit_requested)
  258. return m_exit_code;
  259. pump(PumpMode::WaitForEvents);
  260. }
  261. VERIFY_NOT_REACHED();
  262. }
  263. size_t EventLoopImplementationUnix::pump(PumpMode mode)
  264. {
  265. static_cast<EventLoopManagerUnix&>(EventLoopManager::the()).wait_for_events(mode);
  266. return ThreadEventQueue::current().process();
  267. }
  268. void EventLoopImplementationUnix::quit(int code)
  269. {
  270. m_exit_requested = true;
  271. m_exit_code = code;
  272. }
  273. void EventLoopImplementationUnix::unquit()
  274. {
  275. m_exit_requested = false;
  276. m_exit_code = 0;
  277. }
  278. bool EventLoopImplementationUnix::was_exit_requested() const
  279. {
  280. return m_exit_requested;
  281. }
  282. void EventLoopImplementationUnix::post_event(EventReceiver& receiver, NonnullOwnPtr<Event>&& event)
  283. {
  284. m_thread_event_queue.post_event(receiver, move(event));
  285. if (&m_thread_event_queue != &ThreadEventQueue::current())
  286. wake();
  287. }
  288. void EventLoopImplementationUnix::wake()
  289. {
  290. int wake_event = 0;
  291. MUST(Core::System::write(m_wake_pipe_fds[1], { &wake_event, sizeof(wake_event) }));
  292. }
  293. void EventLoopManagerUnix::wait_for_events(EventLoopImplementation::PumpMode mode)
  294. {
  295. auto& thread_data = ThreadData::the();
  296. retry:
  297. bool has_pending_events = ThreadEventQueue::current().has_pending_events();
  298. auto time_at_iteration_start = MonotonicTime::now_coarse();
  299. thread_data.timeouts.absolutize_relative_timeouts(time_at_iteration_start);
  300. // Figure out how long to wait at maximum.
  301. // This mainly depends on the PumpMode and whether we have pending events, but also the next expiring timer.
  302. int timeout = 0;
  303. bool should_wait_forever = false;
  304. if (mode == EventLoopImplementation::PumpMode::WaitForEvents && !has_pending_events) {
  305. auto next_timer_expiration = thread_data.timeouts.next_timer_expiration();
  306. if (next_timer_expiration.has_value()) {
  307. auto computed_timeout = next_timer_expiration.value() - time_at_iteration_start;
  308. if (computed_timeout.is_negative())
  309. computed_timeout = Duration::zero();
  310. i64 true_timeout = computed_timeout.to_milliseconds();
  311. timeout = static_cast<i32>(min<i64>(AK::NumericLimits<i32>::max(), true_timeout));
  312. } else {
  313. should_wait_forever = true;
  314. }
  315. }
  316. try_select_again:
  317. // select() and wait for file system events, calls to wake(), POSIX signals, or timer expirations.
  318. ErrorOr<int> error_or_marked_fd_count = System::poll(thread_data.poll_fds, should_wait_forever ? -1 : timeout);
  319. auto time_after_poll = MonotonicTime::now_coarse();
  320. // Because POSIX, we might spuriously return from select() with EINTR; just select again.
  321. if (error_or_marked_fd_count.is_error()) {
  322. if (error_or_marked_fd_count.error().code() == EINTR)
  323. goto try_select_again;
  324. dbgln("EventLoopImplementationUnix::wait_for_events: {}", error_or_marked_fd_count.error());
  325. VERIFY_NOT_REACHED();
  326. }
  327. // We woke up due to a call to wake() or a POSIX signal.
  328. // Handle signals and see whether we need to handle events as well.
  329. if (has_flag(thread_data.poll_fds[0].revents, POLLIN)) {
  330. int wake_events[8];
  331. ssize_t nread;
  332. // We might receive another signal while read()ing here. The signal will go to the handle_signal properly,
  333. // but we get interrupted. Therefore, just retry while we were interrupted.
  334. do {
  335. errno = 0;
  336. nread = read(thread_data.wake_pipe_fds[0], wake_events, sizeof(wake_events));
  337. if (nread == 0)
  338. break;
  339. } while (nread < 0 && errno == EINTR);
  340. if (nread < 0) {
  341. perror("EventLoopImplementationUnix::wait_for_events: read from wake pipe");
  342. VERIFY_NOT_REACHED();
  343. }
  344. VERIFY(nread > 0);
  345. bool wake_requested = false;
  346. int event_count = nread / sizeof(wake_events[0]);
  347. for (int i = 0; i < event_count; i++) {
  348. if (wake_events[i] != 0)
  349. dispatch_signal(wake_events[i]);
  350. else
  351. wake_requested = true;
  352. }
  353. if (!wake_requested && nread == sizeof(wake_events))
  354. goto retry;
  355. }
  356. if (error_or_marked_fd_count.value() != 0) {
  357. // Handle file system notifiers by making them normal events.
  358. for (size_t i = 1; i < thread_data.poll_fds.size(); ++i) {
  359. auto& revents = thread_data.poll_fds[i].revents;
  360. auto& notifier = *thread_data.notifier_by_index[i];
  361. NotificationType type = NotificationType::None;
  362. if (has_flag(revents, POLLIN))
  363. type |= NotificationType::Read;
  364. if (has_flag(revents, POLLOUT))
  365. type |= NotificationType::Write;
  366. if (has_flag(revents, POLLHUP))
  367. type |= NotificationType::HangUp;
  368. if (has_flag(revents, POLLERR))
  369. type |= NotificationType::Error;
  370. type &= notifier.type();
  371. if (type != NotificationType::None)
  372. ThreadEventQueue::current().post_event(notifier, make<NotifierActivationEvent>(notifier.fd(), type));
  373. }
  374. }
  375. // Handle expired timers.
  376. thread_data.timeouts.fire_expired(time_after_poll);
  377. }
  378. class SignalHandlers : public RefCounted<SignalHandlers> {
  379. AK_MAKE_NONCOPYABLE(SignalHandlers);
  380. AK_MAKE_NONMOVABLE(SignalHandlers);
  381. public:
  382. SignalHandlers(int signal_number, void (*handle_signal)(int));
  383. ~SignalHandlers();
  384. void dispatch();
  385. int add(Function<void(int)>&& handler);
  386. bool remove(int handler_id);
  387. bool is_empty() const
  388. {
  389. if (m_calling_handlers) {
  390. for (auto& handler : m_handlers_pending) {
  391. if (handler.value)
  392. return false; // an add is pending
  393. }
  394. }
  395. return m_handlers.is_empty();
  396. }
  397. bool have(int handler_id) const
  398. {
  399. if (m_calling_handlers) {
  400. auto it = m_handlers_pending.find(handler_id);
  401. if (it != m_handlers_pending.end()) {
  402. if (!it->value)
  403. return false; // a deletion is pending
  404. }
  405. }
  406. return m_handlers.contains(handler_id);
  407. }
  408. int m_signal_number;
  409. void (*m_original_handler)(int); // TODO: can't use sighandler_t?
  410. HashMap<int, Function<void(int)>> m_handlers;
  411. HashMap<int, Function<void(int)>> m_handlers_pending;
  412. bool m_calling_handlers { false };
  413. };
  414. struct SignalHandlersInfo {
  415. HashMap<int, NonnullRefPtr<SignalHandlers>> signal_handlers;
  416. int next_signal_id { 0 };
  417. };
  418. static Singleton<SignalHandlersInfo> s_signals;
  419. template<bool create_if_null = true>
  420. inline SignalHandlersInfo* signals_info()
  421. {
  422. return s_signals.ptr();
  423. }
  424. void EventLoopManagerUnix::dispatch_signal(int signal_number)
  425. {
  426. auto& info = *signals_info();
  427. auto handlers = info.signal_handlers.find(signal_number);
  428. if (handlers != info.signal_handlers.end()) {
  429. // Make sure we bump the ref count while dispatching the handlers!
  430. // This allows a handler to unregister/register while the handlers
  431. // are being called!
  432. auto handler = handlers->value;
  433. handler->dispatch();
  434. }
  435. }
  436. void EventLoopImplementationUnix::notify_forked_and_in_child()
  437. {
  438. auto& thread_data = ThreadData::the();
  439. thread_data.timeouts.clear();
  440. thread_data.poll_fds.clear();
  441. thread_data.notifier_by_ptr.clear();
  442. thread_data.notifier_by_index.clear();
  443. thread_data.initialize_wake_pipe();
  444. if (auto* info = signals_info<false>()) {
  445. info->signal_handlers.clear();
  446. info->next_signal_id = 0;
  447. }
  448. thread_data.pid = getpid();
  449. }
  450. SignalHandlers::SignalHandlers(int signal_number, void (*handle_signal)(int))
  451. : m_signal_number(signal_number)
  452. , m_original_handler(signal(signal_number, handle_signal))
  453. {
  454. }
  455. SignalHandlers::~SignalHandlers()
  456. {
  457. signal(m_signal_number, m_original_handler);
  458. }
  459. void SignalHandlers::dispatch()
  460. {
  461. TemporaryChange change(m_calling_handlers, true);
  462. for (auto& handler : m_handlers)
  463. handler.value(m_signal_number);
  464. if (!m_handlers_pending.is_empty()) {
  465. // Apply pending adds/removes
  466. for (auto& handler : m_handlers_pending) {
  467. if (handler.value) {
  468. auto result = m_handlers.set(handler.key, move(handler.value));
  469. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  470. } else {
  471. m_handlers.remove(handler.key);
  472. }
  473. }
  474. m_handlers_pending.clear();
  475. }
  476. }
  477. int SignalHandlers::add(Function<void(int)>&& handler)
  478. {
  479. int id = ++signals_info()->next_signal_id; // TODO: worry about wrapping and duplicates?
  480. if (m_calling_handlers)
  481. m_handlers_pending.set(id, move(handler));
  482. else
  483. m_handlers.set(id, move(handler));
  484. return id;
  485. }
  486. bool SignalHandlers::remove(int handler_id)
  487. {
  488. VERIFY(handler_id != 0);
  489. if (m_calling_handlers) {
  490. auto it = m_handlers.find(handler_id);
  491. if (it != m_handlers.end()) {
  492. // Mark pending remove
  493. m_handlers_pending.set(handler_id, {});
  494. return true;
  495. }
  496. it = m_handlers_pending.find(handler_id);
  497. if (it != m_handlers_pending.end()) {
  498. if (!it->value)
  499. return false; // already was marked as deleted
  500. it->value = nullptr;
  501. return true;
  502. }
  503. return false;
  504. }
  505. return m_handlers.remove(handler_id);
  506. }
  507. void EventLoopManagerUnix::handle_signal(int signal_number)
  508. {
  509. VERIFY(signal_number != 0);
  510. auto& thread_data = ThreadData::the();
  511. // We MUST check if the current pid still matches, because there
  512. // is a window between fork() and exec() where a signal delivered
  513. // to our fork could be inadvertently routed to the parent process!
  514. if (getpid() == thread_data.pid) {
  515. int nwritten = write(thread_data.wake_pipe_fds[1], &signal_number, sizeof(signal_number));
  516. if (nwritten < 0) {
  517. perror("EventLoopImplementationUnix::register_signal: write");
  518. VERIFY_NOT_REACHED();
  519. }
  520. } else {
  521. // We're a fork who received a signal, reset thread_data.pid.
  522. thread_data.pid = getpid();
  523. }
  524. }
  525. int EventLoopManagerUnix::register_signal(int signal_number, Function<void(int)> handler)
  526. {
  527. VERIFY(signal_number != 0);
  528. auto& info = *signals_info();
  529. auto handlers = info.signal_handlers.find(signal_number);
  530. if (handlers == info.signal_handlers.end()) {
  531. auto signal_handlers = adopt_ref(*new SignalHandlers(signal_number, EventLoopManagerUnix::handle_signal));
  532. auto handler_id = signal_handlers->add(move(handler));
  533. info.signal_handlers.set(signal_number, move(signal_handlers));
  534. return handler_id;
  535. } else {
  536. return handlers->value->add(move(handler));
  537. }
  538. }
  539. void EventLoopManagerUnix::unregister_signal(int handler_id)
  540. {
  541. VERIFY(handler_id != 0);
  542. int remove_signal_number = 0;
  543. auto& info = *signals_info();
  544. for (auto& h : info.signal_handlers) {
  545. auto& handlers = *h.value;
  546. if (handlers.remove(handler_id)) {
  547. if (handlers.is_empty())
  548. remove_signal_number = handlers.m_signal_number;
  549. break;
  550. }
  551. }
  552. if (remove_signal_number != 0)
  553. info.signal_handlers.remove(remove_signal_number);
  554. }
  555. intptr_t EventLoopManagerUnix::register_timer(EventReceiver& object, int milliseconds, bool should_reload, TimerShouldFireWhenNotVisible fire_when_not_visible)
  556. {
  557. VERIFY(milliseconds >= 0);
  558. auto& thread_data = ThreadData::the();
  559. auto timer = new EventLoopTimer;
  560. timer->owner_thread = s_thread_id;
  561. timer->owner = object;
  562. timer->interval = Duration::from_milliseconds(milliseconds);
  563. timer->reload(MonotonicTime::now_coarse());
  564. timer->should_reload = should_reload;
  565. timer->fire_when_not_visible = fire_when_not_visible;
  566. thread_data.timeouts.schedule_absolute(timer);
  567. return bit_cast<intptr_t>(timer);
  568. }
  569. void EventLoopManagerUnix::unregister_timer(intptr_t timer_id)
  570. {
  571. auto* timer = bit_cast<EventLoopTimer*>(timer_id);
  572. auto& thread_data = ThreadData::for_thread(timer->owner_thread);
  573. auto expected = false;
  574. if (timer->is_being_deleted.compare_exchange_strong(expected, true, AK::MemoryOrder::memory_order_acq_rel)) {
  575. if (timer->is_scheduled())
  576. thread_data.timeouts.unschedule(timer);
  577. delete timer;
  578. }
  579. }
  580. void EventLoopManagerUnix::register_notifier(Notifier& notifier)
  581. {
  582. auto& thread_data = ThreadData::the();
  583. thread_data.notifier_by_ptr.set(&notifier, thread_data.poll_fds.size());
  584. thread_data.notifier_by_index.append(&notifier);
  585. thread_data.poll_fds.append({
  586. .fd = notifier.fd(),
  587. .events = notification_type_to_poll_events(notifier.type()),
  588. .revents = 0,
  589. });
  590. notifier.set_owner_thread(s_thread_id);
  591. }
  592. void EventLoopManagerUnix::unregister_notifier(Notifier& notifier)
  593. {
  594. auto& thread_data = ThreadData::for_thread(notifier.owner_thread());
  595. auto it = thread_data.notifier_by_ptr.find(&notifier);
  596. VERIFY(it != thread_data.notifier_by_ptr.end());
  597. size_t notifier_index = it->value;
  598. thread_data.notifier_by_ptr.remove(it);
  599. if (notifier_index + 1 != thread_data.poll_fds.size()) {
  600. swap(thread_data.poll_fds[notifier_index], thread_data.poll_fds.last());
  601. swap(thread_data.notifier_by_index[notifier_index], thread_data.notifier_by_index.last());
  602. thread_data.notifier_by_ptr.set(thread_data.notifier_by_index[notifier_index], notifier_index);
  603. }
  604. thread_data.poll_fds.take_last();
  605. thread_data.notifier_by_index.take_last();
  606. }
  607. void EventLoopManagerUnix::did_post_event()
  608. {
  609. }
  610. EventLoopManagerUnix::~EventLoopManagerUnix() = default;
  611. NonnullOwnPtr<EventLoopImplementation> EventLoopManagerUnix::make_implementation()
  612. {
  613. return adopt_own(*new EventLoopImplementationUnix);
  614. }
  615. }