TimerQueue.cpp 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Function.h>
  7. #include <AK/NonnullOwnPtr.h>
  8. #include <AK/OwnPtr.h>
  9. #include <AK/Singleton.h>
  10. #include <AK/Time.h>
  11. #include <Kernel/Scheduler.h>
  12. #include <Kernel/Time/TimeManagement.h>
  13. #include <Kernel/TimerQueue.h>
  14. namespace Kernel {
  15. static AK::Singleton<TimerQueue> s_the;
  16. static SpinLock<u8> g_timerqueue_lock;
  17. Time Timer::remaining() const
  18. {
  19. return m_remaining;
  20. }
  21. Time Timer::now(bool is_firing) const
  22. {
  23. // NOTE: If is_firing is true then TimePrecision::Precise isn't really useful here.
  24. // We already have a quite precise time stamp because we just updated the time in the
  25. // interrupt handler. In those cases, just use coarse timestamps.
  26. auto clock_id = m_clock_id;
  27. if (is_firing) {
  28. switch (clock_id) {
  29. case CLOCK_MONOTONIC:
  30. clock_id = CLOCK_MONOTONIC_COARSE;
  31. break;
  32. case CLOCK_MONOTONIC_RAW:
  33. // TODO: use a special CLOCK_MONOTONIC_RAW_COARSE like mechanism here
  34. break;
  35. case CLOCK_REALTIME:
  36. clock_id = CLOCK_REALTIME_COARSE;
  37. break;
  38. default:
  39. break;
  40. }
  41. }
  42. return TimeManagement::the().current_time(clock_id).value();
  43. }
  44. TimerQueue& TimerQueue::the()
  45. {
  46. return *s_the;
  47. }
  48. UNMAP_AFTER_INIT TimerQueue::TimerQueue()
  49. {
  50. m_ticks_per_second = TimeManagement::the().ticks_per_second();
  51. }
  52. RefPtr<Timer> TimerQueue::add_timer_without_id(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  53. {
  54. if (deadline <= TimeManagement::the().current_time(clock_id).value())
  55. return {};
  56. // Because timer handlers can execute on any processor and there is
  57. // a race between executing a timer handler and cancel_timer() this
  58. // *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
  59. // inadvertently cancel another timer that has been created between
  60. // returning from the timer handler and a call to cancel_timer().
  61. auto timer = adopt_ref(*new Timer(clock_id, deadline, move(callback)));
  62. ScopedSpinLock lock(g_timerqueue_lock);
  63. timer->m_id = 0; // Don't generate a timer id
  64. add_timer_locked(timer);
  65. return timer;
  66. }
  67. TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
  68. {
  69. ScopedSpinLock lock(g_timerqueue_lock);
  70. timer->m_id = ++m_timer_id_count;
  71. VERIFY(timer->m_id != 0); // wrapped
  72. add_timer_locked(move(timer));
  73. return timer->m_id;
  74. }
  75. void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
  76. {
  77. Time timer_expiration = timer->m_expires;
  78. VERIFY(!timer->is_queued());
  79. auto& queue = queue_for_timer(*timer);
  80. if (queue.list.is_empty()) {
  81. queue.list.append(&timer.leak_ref());
  82. queue.next_timer_due = timer_expiration;
  83. } else {
  84. Timer* following_timer = nullptr;
  85. queue.list.for_each([&](Timer& t) {
  86. if (t.m_expires > timer_expiration) {
  87. following_timer = &t;
  88. return IterationDecision::Break;
  89. }
  90. return IterationDecision::Continue;
  91. });
  92. if (following_timer) {
  93. bool next_timer_needs_update = queue.list.head() == following_timer;
  94. queue.list.insert_before(following_timer, &timer.leak_ref());
  95. if (next_timer_needs_update)
  96. queue.next_timer_due = timer_expiration;
  97. } else {
  98. queue.list.append(&timer.leak_ref());
  99. }
  100. }
  101. }
  102. TimerId TimerQueue::add_timer(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  103. {
  104. auto expires = TimeManagement::the().current_time(clock_id).value();
  105. expires = expires + deadline;
  106. return add_timer(adopt_ref(*new Timer(clock_id, expires, move(callback))));
  107. }
  108. bool TimerQueue::cancel_timer(TimerId id)
  109. {
  110. Timer* found_timer = nullptr;
  111. Queue* timer_queue = nullptr;
  112. ScopedSpinLock lock(g_timerqueue_lock);
  113. if (m_timer_queue_monotonic.list.for_each([&](Timer& timer) {
  114. if (timer.m_id == id) {
  115. found_timer = &timer;
  116. timer_queue = &m_timer_queue_monotonic;
  117. return IterationDecision::Break;
  118. }
  119. return IterationDecision::Continue;
  120. })
  121. != IterationDecision::Break) {
  122. m_timer_queue_realtime.list.for_each([&](Timer& timer) {
  123. if (timer.m_id == id) {
  124. found_timer = &timer;
  125. timer_queue = &m_timer_queue_realtime;
  126. return IterationDecision::Break;
  127. }
  128. return IterationDecision::Continue;
  129. });
  130. }
  131. if (!found_timer) {
  132. // The timer may be executing right now, if it is then it should
  133. // be in m_timers_executing. If it is then release the lock
  134. // briefly to allow it to finish by removing itself
  135. // NOTE: This can only happen with multiple processors!
  136. while (m_timers_executing.for_each([&](Timer& timer) {
  137. if (timer.m_id == id)
  138. return IterationDecision::Break;
  139. return IterationDecision::Continue;
  140. }) == IterationDecision::Break) {
  141. // NOTE: This isn't the most efficient way to wait, but
  142. // it should only happen when multiple processors are used.
  143. // Also, the timers should execute pretty quickly, so it
  144. // should not loop here for very long. But we can't yield.
  145. lock.unlock();
  146. Processor::wait_check();
  147. lock.lock();
  148. }
  149. // We were not able to cancel the timer, but at this point
  150. // the handler should have completed if it was running!
  151. return false;
  152. }
  153. VERIFY(found_timer);
  154. VERIFY(timer_queue);
  155. remove_timer_locked(*timer_queue, *found_timer);
  156. return true;
  157. }
  158. bool TimerQueue::cancel_timer(Timer& timer)
  159. {
  160. auto& timer_queue = queue_for_timer(timer);
  161. ScopedSpinLock lock(g_timerqueue_lock);
  162. if (!timer_queue.list.contains_slow(&timer)) {
  163. // The timer may be executing right now, if it is then it should
  164. // be in m_timers_executing. If it is then release the lock
  165. // briefly to allow it to finish by removing itself
  166. // NOTE: This can only happen with multiple processors!
  167. while (m_timers_executing.contains_slow(&timer)) {
  168. // NOTE: This isn't the most efficient way to wait, but
  169. // it should only happen when multiple processors are used.
  170. // Also, the timers should execute pretty quickly, so it
  171. // should not loop here for very long. But we can't yield.
  172. lock.unlock();
  173. Processor::wait_check();
  174. lock.lock();
  175. }
  176. // We were not able to cancel the timer, but at this point
  177. // the handler should have completed if it was running!
  178. return false;
  179. }
  180. VERIFY(timer.ref_count() > 1);
  181. remove_timer_locked(timer_queue, timer);
  182. return true;
  183. }
  184. void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
  185. {
  186. bool was_next_timer = (queue.list.head() == &timer);
  187. queue.list.remove(&timer);
  188. timer.set_queued(false);
  189. auto now = timer.now(false);
  190. if (timer.m_expires > now)
  191. timer.m_remaining = timer.m_expires - now;
  192. if (was_next_timer)
  193. update_next_timer_due(queue);
  194. // Whenever we remove a timer that was still queued (but hasn't been
  195. // fired) we added a reference to it. So, when removing it from the
  196. // queue we need to drop that reference.
  197. timer.unref();
  198. }
  199. void TimerQueue::fire()
  200. {
  201. ScopedSpinLock lock(g_timerqueue_lock);
  202. auto fire_timers = [&](Queue& queue) {
  203. auto* timer = queue.list.head();
  204. VERIFY(timer);
  205. VERIFY(queue.next_timer_due == timer->m_expires);
  206. while (timer && timer->now(true) > timer->m_expires) {
  207. queue.list.remove(timer);
  208. timer->set_queued(false);
  209. m_timers_executing.append(timer);
  210. update_next_timer_due(queue);
  211. lock.unlock();
  212. // Defer executing the timer outside of the irq handler
  213. Processor::current().deferred_call_queue([this, timer]() {
  214. timer->m_callback();
  215. ScopedSpinLock lock(g_timerqueue_lock);
  216. m_timers_executing.remove(timer);
  217. // Drop the reference we added when queueing the timer
  218. timer->unref();
  219. });
  220. lock.lock();
  221. timer = queue.list.head();
  222. }
  223. };
  224. if (!m_timer_queue_monotonic.list.is_empty())
  225. fire_timers(m_timer_queue_monotonic);
  226. if (!m_timer_queue_realtime.list.is_empty())
  227. fire_timers(m_timer_queue_realtime);
  228. }
  229. void TimerQueue::update_next_timer_due(Queue& queue)
  230. {
  231. VERIFY(g_timerqueue_lock.is_locked());
  232. if (auto* next_timer = queue.list.head())
  233. queue.next_timer_due = next_timer->m_expires;
  234. else
  235. queue.next_timer_due = {};
  236. }
  237. }