EventLoopImplementationUnix.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*
  2. * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BinaryHeap.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/TemporaryChange.h>
  9. #include <AK/Time.h>
  10. #include <AK/WeakPtr.h>
  11. #include <LibCore/Event.h>
  12. #include <LibCore/EventLoopImplementationUnix.h>
  13. #include <LibCore/EventReceiver.h>
  14. #include <LibCore/Notifier.h>
  15. #include <LibCore/Socket.h>
  16. #include <LibCore/System.h>
  17. #include <LibCore/ThreadEventQueue.h>
  18. #include <sys/select.h>
  19. #include <unistd.h>
  20. namespace Core {
  21. namespace {
  22. struct ThreadData;
  23. class TimeoutSet;
  24. thread_local ThreadData* s_thread_data;
  25. short notification_type_to_poll_events(NotificationType type)
  26. {
  27. short events = 0;
  28. if (has_flag(type, NotificationType::Read))
  29. events |= POLLIN;
  30. if (has_flag(type, NotificationType::Write))
  31. events |= POLLOUT;
  32. return events;
  33. }
  34. bool has_flag(int value, int flag)
  35. {
  36. return (value & flag) == flag;
  37. }
  38. class EventLoopTimeout {
  39. public:
  40. static constexpr ssize_t INVALID_INDEX = NumericLimits<ssize_t>::max();
  41. EventLoopTimeout() { }
  42. virtual ~EventLoopTimeout() = default;
  43. virtual void fire(TimeoutSet& timeout_set, MonotonicTime time) = 0;
  44. MonotonicTime fire_time() const { return m_fire_time; }
  45. void absolutize(Badge<TimeoutSet>, MonotonicTime current_time)
  46. {
  47. m_fire_time = current_time + m_duration;
  48. }
  49. ssize_t& index(Badge<TimeoutSet>) { return m_index; }
  50. void set_index(Badge<TimeoutSet>, ssize_t index) { m_index = index; }
  51. bool is_scheduled() const { return m_index != INVALID_INDEX; }
  52. protected:
  53. union {
  54. Duration m_duration;
  55. MonotonicTime m_fire_time;
  56. };
  57. private:
  58. ssize_t m_index = INVALID_INDEX;
  59. };
  60. class TimeoutSet {
  61. public:
  62. TimeoutSet() = default;
  63. Optional<MonotonicTime> next_timer_expiration()
  64. {
  65. if (!m_heap.is_empty()) {
  66. return m_heap.peek_min()->fire_time();
  67. } else {
  68. return {};
  69. }
  70. }
  71. void absolutize_relative_timeouts(MonotonicTime current_time)
  72. {
  73. for (auto timeout : m_scheduled_timeouts) {
  74. timeout->absolutize({}, current_time);
  75. m_heap.insert(timeout);
  76. }
  77. m_scheduled_timeouts.clear();
  78. }
  79. size_t fire_expired(MonotonicTime current_time)
  80. {
  81. size_t fired_count = 0;
  82. while (!m_heap.is_empty()) {
  83. auto& timeout = *m_heap.peek_min();
  84. if (timeout.fire_time() <= current_time) {
  85. ++fired_count;
  86. m_heap.pop_min();
  87. timeout.set_index({}, EventLoopTimeout::INVALID_INDEX);
  88. timeout.fire(*this, current_time);
  89. } else {
  90. break;
  91. }
  92. }
  93. return fired_count;
  94. }
  95. void schedule_relative(EventLoopTimeout* timeout)
  96. {
  97. timeout->set_index({}, -1 - static_cast<ssize_t>(m_scheduled_timeouts.size()));
  98. m_scheduled_timeouts.append(timeout);
  99. }
  100. void schedule_absolute(EventLoopTimeout* timeout)
  101. {
  102. m_heap.insert(timeout);
  103. }
  104. void unschedule(EventLoopTimeout* timeout)
  105. {
  106. if (timeout->index({}) < 0) {
  107. size_t i = -1 - timeout->index({});
  108. size_t j = m_scheduled_timeouts.size() - 1;
  109. VERIFY(m_scheduled_timeouts[i] == timeout);
  110. swap(m_scheduled_timeouts[i], m_scheduled_timeouts[j]);
  111. swap(m_scheduled_timeouts[i]->index({}), m_scheduled_timeouts[j]->index({}));
  112. (void)m_scheduled_timeouts.take_last();
  113. } else {
  114. m_heap.pop(timeout->index({}));
  115. }
  116. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  117. }
  118. void clear()
  119. {
  120. for (auto* timeout : m_heap.nodes_in_arbitrary_order())
  121. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  122. m_heap.clear();
  123. for (auto* timeout : m_scheduled_timeouts)
  124. timeout->set_index({}, EventLoopTimeout::INVALID_INDEX);
  125. m_scheduled_timeouts.clear();
  126. }
  127. private:
  128. IntrusiveBinaryHeap<
  129. EventLoopTimeout*,
  130. decltype([](EventLoopTimeout* a, EventLoopTimeout* b) {
  131. return a->fire_time() < b->fire_time();
  132. }),
  133. decltype([](EventLoopTimeout* timeout, size_t index) {
  134. timeout->set_index({}, static_cast<ssize_t>(index));
  135. }),
  136. 8>
  137. m_heap;
  138. Vector<EventLoopTimeout*, 8> m_scheduled_timeouts;
  139. };
  140. class EventLoopTimer final : public EventLoopTimeout {
  141. public:
  142. static constexpr auto delay_tolerance = Duration::from_milliseconds(5);
  143. EventLoopTimer() = default;
  144. void reload(MonotonicTime const& now) { m_fire_time = now + interval; }
  145. virtual void fire(TimeoutSet& timeout_set, MonotonicTime current_time) override
  146. {
  147. auto strong_owner = owner.strong_ref();
  148. if (!strong_owner)
  149. return;
  150. if (should_reload) {
  151. MonotonicTime next_fire_time = m_fire_time + interval;
  152. if (next_fire_time <= current_time) {
  153. auto delay = current_time - next_fire_time;
  154. if (delay >= delay_tolerance && !interval.is_zero()) {
  155. auto iterations = delay.to_milliseconds() / max<i64>(1, interval.to_milliseconds()) + 1;
  156. dbgln("Can't keep up! Skipping approximately {} iteration(s) of a reloading timer (delayed by {}ms).", iterations, delay.to_milliseconds());
  157. }
  158. next_fire_time = current_time + interval;
  159. }
  160. m_fire_time = next_fire_time;
  161. if (next_fire_time != current_time) {
  162. timeout_set.schedule_absolute(this);
  163. } else {
  164. // NOTE: Unfortunately we need to treat timeouts with the zero interval in a
  165. // special way. TimeoutSet::schedule_absolute for them will result in an
  166. // infinite loop. TimeoutSet::schedule_relative, on the other hand, will do a
  167. // correct thing of scheduling them for the next iteration of the loop.
  168. m_duration = {};
  169. timeout_set.schedule_relative(this);
  170. }
  171. }
  172. // FIXME: While TimerShouldFireWhenNotVisible::Yes prevents the timer callback from being
  173. // called, it doesn't allow event loop to sleep since it needs to constantly check if
  174. // is_visible_for_timer_purposes changed. A better solution will be to unregister a
  175. // timer and register it back again when needed. This also has an added benefit of
  176. // making fire_when_not_visible and is_visible_for_timer_purposes obsolete.
  177. if (fire_when_not_visible == TimerShouldFireWhenNotVisible::Yes || strong_owner->is_visible_for_timer_purposes())
  178. ThreadEventQueue::current().post_event(*strong_owner, make<TimerEvent>());
  179. }
  180. Duration interval;
  181. bool should_reload { false };
  182. TimerShouldFireWhenNotVisible fire_when_not_visible { TimerShouldFireWhenNotVisible::No };
  183. WeakPtr<EventReceiver> owner;
  184. };
  185. struct ThreadData {
  186. static ThreadData& the()
  187. {
  188. if (!s_thread_data) {
  189. // FIXME: Don't leak this.
  190. s_thread_data = new ThreadData;
  191. }
  192. return *s_thread_data;
  193. }
  194. ThreadData()
  195. {
  196. pid = getpid();
  197. initialize_wake_pipe();
  198. }
  199. void initialize_wake_pipe()
  200. {
  201. if (wake_pipe_fds[0] != -1)
  202. close(wake_pipe_fds[0]);
  203. if (wake_pipe_fds[1] != -1)
  204. close(wake_pipe_fds[1]);
  205. #if defined(SOCK_NONBLOCK)
  206. int rc = pipe2(wake_pipe_fds, O_CLOEXEC);
  207. #else
  208. int rc = pipe(wake_pipe_fds);
  209. fcntl(wake_pipe_fds[0], F_SETFD, FD_CLOEXEC);
  210. fcntl(wake_pipe_fds[1], F_SETFD, FD_CLOEXEC);
  211. #endif
  212. VERIFY(rc == 0);
  213. // The wake pipe informs us of POSIX signals as well as manual calls to wake()
  214. VERIFY(poll_fds.size() == 0);
  215. poll_fds.append({ .fd = wake_pipe_fds[0], .events = POLLIN, .revents = 0 });
  216. notifier_by_index.append(nullptr);
  217. }
  218. // Each thread has its own timers, notifiers and a wake pipe.
  219. TimeoutSet timeouts;
  220. Vector<pollfd> poll_fds;
  221. HashMap<Notifier*, size_t> notifier_by_ptr;
  222. Vector<Notifier*> notifier_by_index;
  223. // The wake pipe is used to notify another event loop that someone has called wake(), or a signal has been received.
  224. // wake() writes 0i32 into the pipe, signals write the signal number (guaranteed non-zero).
  225. int wake_pipe_fds[2] { -1, -1 };
  226. pid_t pid { 0 };
  227. };
  228. }
  229. EventLoopImplementationUnix::EventLoopImplementationUnix()
  230. : m_wake_pipe_fds(&ThreadData::the().wake_pipe_fds)
  231. {
  232. }
  233. EventLoopImplementationUnix::~EventLoopImplementationUnix() = default;
  234. int EventLoopImplementationUnix::exec()
  235. {
  236. for (;;) {
  237. if (m_exit_requested)
  238. return m_exit_code;
  239. pump(PumpMode::WaitForEvents);
  240. }
  241. VERIFY_NOT_REACHED();
  242. }
  243. size_t EventLoopImplementationUnix::pump(PumpMode mode)
  244. {
  245. static_cast<EventLoopManagerUnix&>(EventLoopManager::the()).wait_for_events(mode);
  246. return ThreadEventQueue::current().process();
  247. }
  248. void EventLoopImplementationUnix::quit(int code)
  249. {
  250. m_exit_requested = true;
  251. m_exit_code = code;
  252. }
  253. void EventLoopImplementationUnix::unquit()
  254. {
  255. m_exit_requested = false;
  256. m_exit_code = 0;
  257. }
  258. bool EventLoopImplementationUnix::was_exit_requested() const
  259. {
  260. return m_exit_requested;
  261. }
  262. void EventLoopImplementationUnix::post_event(EventReceiver& receiver, NonnullOwnPtr<Event>&& event)
  263. {
  264. m_thread_event_queue.post_event(receiver, move(event));
  265. if (&m_thread_event_queue != &ThreadEventQueue::current())
  266. wake();
  267. }
  268. void EventLoopImplementationUnix::wake()
  269. {
  270. int wake_event = 0;
  271. MUST(Core::System::write((*m_wake_pipe_fds)[1], { &wake_event, sizeof(wake_event) }));
  272. }
  273. void EventLoopManagerUnix::wait_for_events(EventLoopImplementation::PumpMode mode)
  274. {
  275. auto& thread_data = ThreadData::the();
  276. retry:
  277. bool has_pending_events = ThreadEventQueue::current().has_pending_events();
  278. auto time_at_iteration_start = MonotonicTime::now_coarse();
  279. thread_data.timeouts.absolutize_relative_timeouts(time_at_iteration_start);
  280. // Figure out how long to wait at maximum.
  281. // This mainly depends on the PumpMode and whether we have pending events, but also the next expiring timer.
  282. int timeout = 0;
  283. bool should_wait_forever = false;
  284. if (mode == EventLoopImplementation::PumpMode::WaitForEvents && !has_pending_events) {
  285. auto next_timer_expiration = thread_data.timeouts.next_timer_expiration();
  286. if (next_timer_expiration.has_value()) {
  287. auto computed_timeout = next_timer_expiration.value() - time_at_iteration_start;
  288. if (computed_timeout.is_negative())
  289. computed_timeout = Duration::zero();
  290. i64 true_timeout = computed_timeout.to_milliseconds();
  291. timeout = static_cast<i32>(min<i64>(AK::NumericLimits<i32>::max(), true_timeout));
  292. } else {
  293. should_wait_forever = true;
  294. }
  295. }
  296. try_select_again:
  297. // select() and wait for file system events, calls to wake(), POSIX signals, or timer expirations.
  298. ErrorOr<int> error_or_marked_fd_count = System::poll(thread_data.poll_fds, should_wait_forever ? -1 : timeout);
  299. auto time_after_poll = MonotonicTime::now_coarse();
  300. // Because POSIX, we might spuriously return from select() with EINTR; just select again.
  301. if (error_or_marked_fd_count.is_error()) {
  302. if (error_or_marked_fd_count.error().code() == EINTR)
  303. goto try_select_again;
  304. dbgln("EventLoopImplementationUnix::wait_for_events: {}", error_or_marked_fd_count.error());
  305. VERIFY_NOT_REACHED();
  306. }
  307. // We woke up due to a call to wake() or a POSIX signal.
  308. // Handle signals and see whether we need to handle events as well.
  309. if (has_flag(thread_data.poll_fds[0].revents, POLLIN)) {
  310. int wake_events[8];
  311. ssize_t nread;
  312. // We might receive another signal while read()ing here. The signal will go to the handle_signal properly,
  313. // but we get interrupted. Therefore, just retry while we were interrupted.
  314. do {
  315. errno = 0;
  316. nread = read(thread_data.wake_pipe_fds[0], wake_events, sizeof(wake_events));
  317. if (nread == 0)
  318. break;
  319. } while (nread < 0 && errno == EINTR);
  320. if (nread < 0) {
  321. perror("EventLoopImplementationUnix::wait_for_events: read from wake pipe");
  322. VERIFY_NOT_REACHED();
  323. }
  324. VERIFY(nread > 0);
  325. bool wake_requested = false;
  326. int event_count = nread / sizeof(wake_events[0]);
  327. for (int i = 0; i < event_count; i++) {
  328. if (wake_events[i] != 0)
  329. dispatch_signal(wake_events[i]);
  330. else
  331. wake_requested = true;
  332. }
  333. if (!wake_requested && nread == sizeof(wake_events))
  334. goto retry;
  335. }
  336. if (error_or_marked_fd_count.value() != 0) {
  337. // Handle file system notifiers by making them normal events.
  338. for (size_t i = 1; i < thread_data.poll_fds.size(); ++i) {
  339. auto& revents = thread_data.poll_fds[i].revents;
  340. auto& notifier = *thread_data.notifier_by_index[i];
  341. NotificationType type = NotificationType::None;
  342. if (has_flag(revents, POLLIN))
  343. type |= NotificationType::Read;
  344. if (has_flag(revents, POLLOUT))
  345. type |= NotificationType::Write;
  346. if (has_flag(revents, POLLHUP))
  347. type |= NotificationType::HangUp;
  348. if (has_flag(revents, POLLERR))
  349. type |= NotificationType::Error;
  350. type &= notifier.type();
  351. if (type != NotificationType::None)
  352. ThreadEventQueue::current().post_event(notifier, make<NotifierActivationEvent>(notifier.fd(), type));
  353. }
  354. }
  355. // Handle expired timers.
  356. thread_data.timeouts.fire_expired(time_after_poll);
  357. }
  358. class SignalHandlers : public RefCounted<SignalHandlers> {
  359. AK_MAKE_NONCOPYABLE(SignalHandlers);
  360. AK_MAKE_NONMOVABLE(SignalHandlers);
  361. public:
  362. SignalHandlers(int signal_number, void (*handle_signal)(int));
  363. ~SignalHandlers();
  364. void dispatch();
  365. int add(Function<void(int)>&& handler);
  366. bool remove(int handler_id);
  367. bool is_empty() const
  368. {
  369. if (m_calling_handlers) {
  370. for (auto& handler : m_handlers_pending) {
  371. if (handler.value)
  372. return false; // an add is pending
  373. }
  374. }
  375. return m_handlers.is_empty();
  376. }
  377. bool have(int handler_id) const
  378. {
  379. if (m_calling_handlers) {
  380. auto it = m_handlers_pending.find(handler_id);
  381. if (it != m_handlers_pending.end()) {
  382. if (!it->value)
  383. return false; // a deletion is pending
  384. }
  385. }
  386. return m_handlers.contains(handler_id);
  387. }
  388. int m_signal_number;
  389. void (*m_original_handler)(int); // TODO: can't use sighandler_t?
  390. HashMap<int, Function<void(int)>> m_handlers;
  391. HashMap<int, Function<void(int)>> m_handlers_pending;
  392. bool m_calling_handlers { false };
  393. };
  394. struct SignalHandlersInfo {
  395. HashMap<int, NonnullRefPtr<SignalHandlers>> signal_handlers;
  396. int next_signal_id { 0 };
  397. };
  398. static Singleton<SignalHandlersInfo> s_signals;
  399. template<bool create_if_null = true>
  400. inline SignalHandlersInfo* signals_info()
  401. {
  402. return s_signals.ptr();
  403. }
  404. void EventLoopManagerUnix::dispatch_signal(int signal_number)
  405. {
  406. auto& info = *signals_info();
  407. auto handlers = info.signal_handlers.find(signal_number);
  408. if (handlers != info.signal_handlers.end()) {
  409. // Make sure we bump the ref count while dispatching the handlers!
  410. // This allows a handler to unregister/register while the handlers
  411. // are being called!
  412. auto handler = handlers->value;
  413. handler->dispatch();
  414. }
  415. }
  416. void EventLoopImplementationUnix::notify_forked_and_in_child()
  417. {
  418. auto& thread_data = ThreadData::the();
  419. thread_data.timeouts.clear();
  420. thread_data.poll_fds.clear();
  421. thread_data.notifier_by_ptr.clear();
  422. thread_data.notifier_by_index.clear();
  423. thread_data.initialize_wake_pipe();
  424. if (auto* info = signals_info<false>()) {
  425. info->signal_handlers.clear();
  426. info->next_signal_id = 0;
  427. }
  428. thread_data.pid = getpid();
  429. }
  430. SignalHandlers::SignalHandlers(int signal_number, void (*handle_signal)(int))
  431. : m_signal_number(signal_number)
  432. , m_original_handler(signal(signal_number, handle_signal))
  433. {
  434. }
  435. SignalHandlers::~SignalHandlers()
  436. {
  437. signal(m_signal_number, m_original_handler);
  438. }
  439. void SignalHandlers::dispatch()
  440. {
  441. TemporaryChange change(m_calling_handlers, true);
  442. for (auto& handler : m_handlers)
  443. handler.value(m_signal_number);
  444. if (!m_handlers_pending.is_empty()) {
  445. // Apply pending adds/removes
  446. for (auto& handler : m_handlers_pending) {
  447. if (handler.value) {
  448. auto result = m_handlers.set(handler.key, move(handler.value));
  449. VERIFY(result == AK::HashSetResult::InsertedNewEntry);
  450. } else {
  451. m_handlers.remove(handler.key);
  452. }
  453. }
  454. m_handlers_pending.clear();
  455. }
  456. }
  457. int SignalHandlers::add(Function<void(int)>&& handler)
  458. {
  459. int id = ++signals_info()->next_signal_id; // TODO: worry about wrapping and duplicates?
  460. if (m_calling_handlers)
  461. m_handlers_pending.set(id, move(handler));
  462. else
  463. m_handlers.set(id, move(handler));
  464. return id;
  465. }
  466. bool SignalHandlers::remove(int handler_id)
  467. {
  468. VERIFY(handler_id != 0);
  469. if (m_calling_handlers) {
  470. auto it = m_handlers.find(handler_id);
  471. if (it != m_handlers.end()) {
  472. // Mark pending remove
  473. m_handlers_pending.set(handler_id, {});
  474. return true;
  475. }
  476. it = m_handlers_pending.find(handler_id);
  477. if (it != m_handlers_pending.end()) {
  478. if (!it->value)
  479. return false; // already was marked as deleted
  480. it->value = nullptr;
  481. return true;
  482. }
  483. return false;
  484. }
  485. return m_handlers.remove(handler_id);
  486. }
  487. void EventLoopManagerUnix::handle_signal(int signal_number)
  488. {
  489. VERIFY(signal_number != 0);
  490. auto& thread_data = ThreadData::the();
  491. // We MUST check if the current pid still matches, because there
  492. // is a window between fork() and exec() where a signal delivered
  493. // to our fork could be inadvertently routed to the parent process!
  494. if (getpid() == thread_data.pid) {
  495. int nwritten = write(thread_data.wake_pipe_fds[1], &signal_number, sizeof(signal_number));
  496. if (nwritten < 0) {
  497. perror("EventLoopImplementationUnix::register_signal: write");
  498. VERIFY_NOT_REACHED();
  499. }
  500. } else {
  501. // We're a fork who received a signal, reset thread_data.pid.
  502. thread_data.pid = getpid();
  503. }
  504. }
  505. int EventLoopManagerUnix::register_signal(int signal_number, Function<void(int)> handler)
  506. {
  507. VERIFY(signal_number != 0);
  508. auto& info = *signals_info();
  509. auto handlers = info.signal_handlers.find(signal_number);
  510. if (handlers == info.signal_handlers.end()) {
  511. auto signal_handlers = adopt_ref(*new SignalHandlers(signal_number, EventLoopManagerUnix::handle_signal));
  512. auto handler_id = signal_handlers->add(move(handler));
  513. info.signal_handlers.set(signal_number, move(signal_handlers));
  514. return handler_id;
  515. } else {
  516. return handlers->value->add(move(handler));
  517. }
  518. }
  519. void EventLoopManagerUnix::unregister_signal(int handler_id)
  520. {
  521. VERIFY(handler_id != 0);
  522. int remove_signal_number = 0;
  523. auto& info = *signals_info();
  524. for (auto& h : info.signal_handlers) {
  525. auto& handlers = *h.value;
  526. if (handlers.remove(handler_id)) {
  527. if (handlers.is_empty())
  528. remove_signal_number = handlers.m_signal_number;
  529. break;
  530. }
  531. }
  532. if (remove_signal_number != 0)
  533. info.signal_handlers.remove(remove_signal_number);
  534. }
  535. intptr_t EventLoopManagerUnix::register_timer(EventReceiver& object, int milliseconds, bool should_reload, TimerShouldFireWhenNotVisible fire_when_not_visible)
  536. {
  537. VERIFY(milliseconds >= 0);
  538. auto& thread_data = ThreadData::the();
  539. auto timer = new EventLoopTimer;
  540. timer->owner = object;
  541. timer->interval = Duration::from_milliseconds(milliseconds);
  542. timer->reload(MonotonicTime::now_coarse());
  543. timer->should_reload = should_reload;
  544. timer->fire_when_not_visible = fire_when_not_visible;
  545. thread_data.timeouts.schedule_absolute(timer);
  546. return bit_cast<intptr_t>(timer);
  547. }
  548. void EventLoopManagerUnix::unregister_timer(intptr_t timer_id)
  549. {
  550. auto& thread_data = ThreadData::the();
  551. auto* timer = bit_cast<EventLoopTimer*>(timer_id);
  552. if (timer->is_scheduled())
  553. thread_data.timeouts.unschedule(timer);
  554. delete timer;
  555. }
  556. void EventLoopManagerUnix::register_notifier(Notifier& notifier)
  557. {
  558. auto& thread_data = ThreadData::the();
  559. thread_data.notifier_by_ptr.set(&notifier, thread_data.poll_fds.size());
  560. thread_data.notifier_by_index.append(&notifier);
  561. thread_data.poll_fds.append({
  562. .fd = notifier.fd(),
  563. .events = notification_type_to_poll_events(notifier.type()),
  564. .revents = 0,
  565. });
  566. }
  567. void EventLoopManagerUnix::unregister_notifier(Notifier& notifier)
  568. {
  569. auto& thread_data = ThreadData::the();
  570. auto it = thread_data.notifier_by_ptr.find(&notifier);
  571. VERIFY(it != thread_data.notifier_by_ptr.end());
  572. size_t notifier_index = it->value;
  573. thread_data.notifier_by_ptr.remove(it);
  574. if (notifier_index + 1 != thread_data.poll_fds.size()) {
  575. swap(thread_data.poll_fds[notifier_index], thread_data.poll_fds.last());
  576. swap(thread_data.notifier_by_index[notifier_index], thread_data.notifier_by_index.last());
  577. thread_data.notifier_by_ptr.set(thread_data.notifier_by_index[notifier_index], notifier_index);
  578. }
  579. thread_data.poll_fds.take_last();
  580. thread_data.notifier_by_index.take_last();
  581. }
  582. void EventLoopManagerUnix::did_post_event()
  583. {
  584. }
  585. EventLoopManagerUnix::~EventLoopManagerUnix() = default;
  586. NonnullOwnPtr<EventLoopImplementation> EventLoopManagerUnix::make_implementation()
  587. {
  588. return adopt_own(*new EventLoopImplementationUnix);
  589. }
  590. }