TimerQueue.cpp 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Function.h>
  7. #include <AK/NonnullOwnPtr.h>
  8. #include <AK/OwnPtr.h>
  9. #include <AK/Singleton.h>
  10. #include <AK/Time.h>
  11. #include <Kernel/Scheduler.h>
  12. #include <Kernel/Time/TimeManagement.h>
  13. #include <Kernel/TimerQueue.h>
  14. namespace Kernel {
  15. static AK::Singleton<TimerQueue> s_the;
  16. static SpinLock<u8> g_timerqueue_lock;
  17. Time Timer::remaining() const
  18. {
  19. return m_remaining;
  20. }
  21. Time Timer::now(bool is_firing) const
  22. {
  23. // NOTE: If is_firing is true then TimePrecision::Precise isn't really useful here.
  24. // We already have a quite precise time stamp because we just updated the time in the
  25. // interrupt handler. In those cases, just use coarse timestamps.
  26. auto clock_id = m_clock_id;
  27. if (is_firing) {
  28. switch (clock_id) {
  29. case CLOCK_MONOTONIC:
  30. clock_id = CLOCK_MONOTONIC_COARSE;
  31. break;
  32. case CLOCK_MONOTONIC_RAW:
  33. // TODO: use a special CLOCK_MONOTONIC_RAW_COARSE like mechanism here
  34. break;
  35. case CLOCK_REALTIME:
  36. clock_id = CLOCK_REALTIME_COARSE;
  37. break;
  38. default:
  39. break;
  40. }
  41. }
  42. return TimeManagement::the().current_time(clock_id);
  43. }
  44. TimerQueue& TimerQueue::the()
  45. {
  46. return *s_the;
  47. }
  48. UNMAP_AFTER_INIT TimerQueue::TimerQueue()
  49. {
  50. m_ticks_per_second = TimeManagement::the().ticks_per_second();
  51. }
  52. bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  53. {
  54. if (deadline <= TimeManagement::the().current_time(clock_id))
  55. return false;
  56. // Because timer handlers can execute on any processor and there is
  57. // a race between executing a timer handler and cancel_timer() this
  58. // *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
  59. // inadvertently cancel another timer that has been created between
  60. // returning from the timer handler and a call to cancel_timer().
  61. timer->setup(clock_id, deadline, move(callback));
  62. ScopedSpinLock lock(g_timerqueue_lock);
  63. timer->m_id = 0; // Don't generate a timer id
  64. add_timer_locked(move(timer));
  65. return true;
  66. }
  67. TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
  68. {
  69. ScopedSpinLock lock(g_timerqueue_lock);
  70. timer->m_id = ++m_timer_id_count;
  71. VERIFY(timer->m_id != 0); // wrapped
  72. add_timer_locked(move(timer));
  73. return timer->m_id;
  74. }
  75. void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
  76. {
  77. Time timer_expiration = timer->m_expires;
  78. VERIFY(!timer->is_queued());
  79. auto& queue = queue_for_timer(*timer);
  80. if (queue.list.is_empty()) {
  81. queue.list.append(&timer.leak_ref());
  82. queue.next_timer_due = timer_expiration;
  83. } else {
  84. Timer* following_timer = nullptr;
  85. queue.list.for_each([&](Timer& t) {
  86. if (t.m_expires > timer_expiration) {
  87. following_timer = &t;
  88. return IterationDecision::Break;
  89. }
  90. return IterationDecision::Continue;
  91. });
  92. if (following_timer) {
  93. bool next_timer_needs_update = queue.list.head() == following_timer;
  94. queue.list.insert_before(following_timer, &timer.leak_ref());
  95. if (next_timer_needs_update)
  96. queue.next_timer_due = timer_expiration;
  97. } else {
  98. queue.list.append(&timer.leak_ref());
  99. }
  100. }
  101. }
  102. TimerId TimerQueue::add_timer(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  103. {
  104. auto expires = TimeManagement::the().current_time(clock_id);
  105. expires = expires + deadline;
  106. auto timer = new Timer();
  107. VERIFY(timer);
  108. timer->setup(clock_id, expires, move(callback));
  109. return add_timer(adopt_ref(*timer));
  110. }
  111. bool TimerQueue::cancel_timer(TimerId id)
  112. {
  113. Timer* found_timer = nullptr;
  114. Queue* timer_queue = nullptr;
  115. ScopedSpinLock lock(g_timerqueue_lock);
  116. if (m_timer_queue_monotonic.list.for_each([&](Timer& timer) {
  117. if (timer.m_id == id) {
  118. found_timer = &timer;
  119. timer_queue = &m_timer_queue_monotonic;
  120. return IterationDecision::Break;
  121. }
  122. return IterationDecision::Continue;
  123. })
  124. != IterationDecision::Break) {
  125. m_timer_queue_realtime.list.for_each([&](Timer& timer) {
  126. if (timer.m_id == id) {
  127. found_timer = &timer;
  128. timer_queue = &m_timer_queue_realtime;
  129. return IterationDecision::Break;
  130. }
  131. return IterationDecision::Continue;
  132. });
  133. }
  134. if (!found_timer) {
  135. // The timer may be executing right now, if it is then it should
  136. // be in m_timers_executing. If it is then release the lock
  137. // briefly to allow it to finish by removing itself
  138. // NOTE: This can only happen with multiple processors!
  139. while (m_timers_executing.for_each([&](Timer& timer) {
  140. if (timer.m_id == id)
  141. return IterationDecision::Break;
  142. return IterationDecision::Continue;
  143. }) == IterationDecision::Break) {
  144. // NOTE: This isn't the most efficient way to wait, but
  145. // it should only happen when multiple processors are used.
  146. // Also, the timers should execute pretty quickly, so it
  147. // should not loop here for very long. But we can't yield.
  148. lock.unlock();
  149. Processor::wait_check();
  150. lock.lock();
  151. }
  152. // We were not able to cancel the timer, but at this point
  153. // the handler should have completed if it was running!
  154. return false;
  155. }
  156. VERIFY(found_timer);
  157. VERIFY(timer_queue);
  158. remove_timer_locked(*timer_queue, *found_timer);
  159. return true;
  160. }
  161. bool TimerQueue::cancel_timer(Timer& timer)
  162. {
  163. auto& timer_queue = queue_for_timer(timer);
  164. ScopedSpinLock lock(g_timerqueue_lock);
  165. if (!timer_queue.list.contains_slow(&timer)) {
  166. // The timer may be executing right now, if it is then it should
  167. // be in m_timers_executing. If it is then release the lock
  168. // briefly to allow it to finish by removing itself
  169. // NOTE: This can only happen with multiple processors!
  170. while (m_timers_executing.contains_slow(&timer)) {
  171. // NOTE: This isn't the most efficient way to wait, but
  172. // it should only happen when multiple processors are used.
  173. // Also, the timers should execute pretty quickly, so it
  174. // should not loop here for very long. But we can't yield.
  175. lock.unlock();
  176. Processor::wait_check();
  177. lock.lock();
  178. }
  179. // We were not able to cancel the timer, but at this point
  180. // the handler should have completed if it was running!
  181. return false;
  182. }
  183. VERIFY(timer.ref_count() > 1);
  184. remove_timer_locked(timer_queue, timer);
  185. return true;
  186. }
  187. void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
  188. {
  189. bool was_next_timer = (queue.list.head() == &timer);
  190. queue.list.remove(&timer);
  191. timer.set_queued(false);
  192. auto now = timer.now(false);
  193. if (timer.m_expires > now)
  194. timer.m_remaining = timer.m_expires - now;
  195. if (was_next_timer)
  196. update_next_timer_due(queue);
  197. // Whenever we remove a timer that was still queued (but hasn't been
  198. // fired) we added a reference to it. So, when removing it from the
  199. // queue we need to drop that reference.
  200. timer.unref();
  201. }
  202. void TimerQueue::fire()
  203. {
  204. ScopedSpinLock lock(g_timerqueue_lock);
  205. auto fire_timers = [&](Queue& queue) {
  206. auto* timer = queue.list.head();
  207. VERIFY(timer);
  208. VERIFY(queue.next_timer_due == timer->m_expires);
  209. while (timer && timer->now(true) > timer->m_expires) {
  210. queue.list.remove(timer);
  211. timer->set_queued(false);
  212. m_timers_executing.append(timer);
  213. update_next_timer_due(queue);
  214. lock.unlock();
  215. // Defer executing the timer outside of the irq handler
  216. Processor::current().deferred_call_queue([this, timer]() {
  217. timer->m_callback();
  218. ScopedSpinLock lock(g_timerqueue_lock);
  219. m_timers_executing.remove(timer);
  220. // Drop the reference we added when queueing the timer
  221. timer->unref();
  222. });
  223. lock.lock();
  224. timer = queue.list.head();
  225. }
  226. };
  227. if (!m_timer_queue_monotonic.list.is_empty())
  228. fire_timers(m_timer_queue_monotonic);
  229. if (!m_timer_queue_realtime.list.is_empty())
  230. fire_timers(m_timer_queue_realtime);
  231. }
  232. void TimerQueue::update_next_timer_due(Queue& queue)
  233. {
  234. VERIFY(g_timerqueue_lock.is_locked());
  235. if (auto* next_timer = queue.list.head())
  236. queue.next_timer_due = next_timer->m_expires;
  237. else
  238. queue.next_timer_due = {};
  239. }
  240. }