TimerQueue.cpp 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/NonnullOwnPtr.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/Time.h>
  9. #include <Kernel/Scheduler.h>
  10. #include <Kernel/Sections.h>
  11. #include <Kernel/Time/TimeManagement.h>
  12. #include <Kernel/TimerQueue.h>
  13. namespace Kernel {
  14. static AK::Singleton<TimerQueue> s_the;
  15. static SpinLock<u8> g_timerqueue_lock;
  16. Time Timer::remaining() const
  17. {
  18. return m_remaining;
  19. }
  20. Time Timer::now(bool is_firing) const
  21. {
  22. // NOTE: If is_firing is true then TimePrecision::Precise isn't really useful here.
  23. // We already have a quite precise time stamp because we just updated the time in the
  24. // interrupt handler. In those cases, just use coarse timestamps.
  25. auto clock_id = m_clock_id;
  26. if (is_firing) {
  27. switch (clock_id) {
  28. case CLOCK_MONOTONIC:
  29. clock_id = CLOCK_MONOTONIC_COARSE;
  30. break;
  31. case CLOCK_MONOTONIC_RAW:
  32. // TODO: use a special CLOCK_MONOTONIC_RAW_COARSE like mechanism here
  33. break;
  34. case CLOCK_REALTIME:
  35. clock_id = CLOCK_REALTIME_COARSE;
  36. break;
  37. default:
  38. break;
  39. }
  40. }
  41. return TimeManagement::the().current_time(clock_id);
  42. }
  43. TimerQueue& TimerQueue::the()
  44. {
  45. return *s_the;
  46. }
  47. UNMAP_AFTER_INIT TimerQueue::TimerQueue()
  48. {
  49. m_ticks_per_second = TimeManagement::the().ticks_per_second();
  50. }
  51. bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  52. {
  53. if (deadline <= TimeManagement::the().current_time(clock_id))
  54. return false;
  55. // Because timer handlers can execute on any processor and there is
  56. // a race between executing a timer handler and cancel_timer() this
  57. // *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
  58. // inadvertently cancel another timer that has been created between
  59. // returning from the timer handler and a call to cancel_timer().
  60. timer->setup(clock_id, deadline, move(callback));
  61. ScopedSpinLock lock(g_timerqueue_lock);
  62. timer->m_id = 0; // Don't generate a timer id
  63. add_timer_locked(move(timer));
  64. return true;
  65. }
  66. TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
  67. {
  68. ScopedSpinLock lock(g_timerqueue_lock);
  69. timer->m_id = ++m_timer_id_count;
  70. VERIFY(timer->m_id != 0); // wrapped
  71. auto id = timer->m_id;
  72. add_timer_locked(move(timer));
  73. return id;
  74. }
  75. void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
  76. {
  77. Time timer_expiration = timer->m_expires;
  78. VERIFY(!timer->is_queued());
  79. auto& queue = queue_for_timer(*timer);
  80. if (queue.list.is_empty()) {
  81. queue.list.append(timer.leak_ref());
  82. queue.next_timer_due = timer_expiration;
  83. } else {
  84. Timer* following_timer = nullptr;
  85. for (auto& t : queue.list) {
  86. if (t.m_expires > timer_expiration) {
  87. following_timer = &t;
  88. break;
  89. }
  90. }
  91. if (following_timer) {
  92. bool next_timer_needs_update = queue.list.first() == following_timer;
  93. queue.list.insert_before(*following_timer, timer.leak_ref());
  94. if (next_timer_needs_update)
  95. queue.next_timer_due = timer_expiration;
  96. } else {
  97. queue.list.append(timer.leak_ref());
  98. }
  99. }
  100. }
  101. TimerId TimerQueue::add_timer(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  102. {
  103. auto expires = TimeManagement::the().current_time(clock_id);
  104. expires = expires + deadline;
  105. auto timer = new Timer();
  106. VERIFY(timer);
  107. timer->setup(clock_id, expires, move(callback));
  108. return add_timer(adopt_ref(*timer));
  109. }
  110. bool TimerQueue::cancel_timer(TimerId id)
  111. {
  112. Timer* found_timer = nullptr;
  113. Queue* timer_queue = nullptr;
  114. ScopedSpinLock lock(g_timerqueue_lock);
  115. for (auto& timer : m_timer_queue_monotonic.list) {
  116. if (timer.m_id == id) {
  117. found_timer = &timer;
  118. timer_queue = &m_timer_queue_monotonic;
  119. break;
  120. }
  121. }
  122. if (found_timer == nullptr) {
  123. for (auto& timer : m_timer_queue_realtime.list) {
  124. if (timer.m_id == id) {
  125. found_timer = &timer;
  126. timer_queue = &m_timer_queue_realtime;
  127. break;
  128. }
  129. };
  130. }
  131. if (!found_timer) {
  132. // The timer may be executing right now, if it is then it should
  133. // be in m_timers_executing. If it is then release the lock
  134. // briefly to allow it to finish by removing itself
  135. // NOTE: This can only happen with multiple processors!
  136. while (true) {
  137. for (auto& timer : m_timers_executing) {
  138. if (timer.m_id == id) {
  139. found_timer = &timer;
  140. break;
  141. }
  142. }
  143. if (found_timer) {
  144. // NOTE: This isn't the most efficient way to wait, but
  145. // it should only happen when multiple processors are used.
  146. // Also, the timers should execute pretty quickly, so it
  147. // should not loop here for very long. But we can't yield.
  148. lock.unlock();
  149. Processor::wait_check();
  150. lock.lock();
  151. found_timer = nullptr;
  152. } else {
  153. // We were not able to cancel the timer, but at this point
  154. // the handler should have completed if it was running!
  155. return false;
  156. }
  157. }
  158. }
  159. VERIFY(found_timer);
  160. VERIFY(timer_queue);
  161. remove_timer_locked(*timer_queue, *found_timer);
  162. return true;
  163. }
  164. bool TimerQueue::cancel_timer(Timer& timer)
  165. {
  166. auto& timer_queue = queue_for_timer(timer);
  167. ScopedSpinLock lock(g_timerqueue_lock);
  168. if (!timer_queue.list.contains(timer)) {
  169. // The timer may be executing right now, if it is then it should
  170. // be in m_timers_executing. If it is then release the lock
  171. // briefly to allow it to finish by removing itself
  172. // NOTE: This can only happen with multiple processors!
  173. while (m_timers_executing.contains(timer)) {
  174. // NOTE: This isn't the most efficient way to wait, but
  175. // it should only happen when multiple processors are used.
  176. // Also, the timers should execute pretty quickly, so it
  177. // should not loop here for very long. But we can't yield.
  178. lock.unlock();
  179. Processor::wait_check();
  180. lock.lock();
  181. }
  182. // We were not able to cancel the timer, but at this point
  183. // the handler should have completed if it was running!
  184. return false;
  185. }
  186. VERIFY(timer.ref_count() > 1);
  187. remove_timer_locked(timer_queue, timer);
  188. return true;
  189. }
  190. void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
  191. {
  192. bool was_next_timer = (queue.list.first() == &timer);
  193. queue.list.remove(timer);
  194. timer.set_queued(false);
  195. auto now = timer.now(false);
  196. if (timer.m_expires > now)
  197. timer.m_remaining = timer.m_expires - now;
  198. if (was_next_timer)
  199. update_next_timer_due(queue);
  200. // Whenever we remove a timer that was still queued (but hasn't been
  201. // fired) we added a reference to it. So, when removing it from the
  202. // queue we need to drop that reference.
  203. timer.unref();
  204. }
  205. void TimerQueue::fire()
  206. {
  207. ScopedSpinLock lock(g_timerqueue_lock);
  208. auto fire_timers = [&](Queue& queue) {
  209. auto* timer = queue.list.first();
  210. VERIFY(timer);
  211. VERIFY(queue.next_timer_due == timer->m_expires);
  212. while (timer && timer->now(true) > timer->m_expires) {
  213. queue.list.remove(*timer);
  214. timer->set_queued(false);
  215. m_timers_executing.append(*timer);
  216. update_next_timer_due(queue);
  217. lock.unlock();
  218. // Defer executing the timer outside of the irq handler
  219. Processor::current().deferred_call_queue([this, timer]() {
  220. timer->m_callback();
  221. ScopedSpinLock lock(g_timerqueue_lock);
  222. m_timers_executing.remove(*timer);
  223. // Drop the reference we added when queueing the timer
  224. timer->unref();
  225. });
  226. lock.lock();
  227. timer = queue.list.first();
  228. }
  229. };
  230. if (!m_timer_queue_monotonic.list.is_empty())
  231. fire_timers(m_timer_queue_monotonic);
  232. if (!m_timer_queue_realtime.list.is_empty())
  233. fire_timers(m_timer_queue_realtime);
  234. }
  235. void TimerQueue::update_next_timer_due(Queue& queue)
  236. {
  237. VERIFY(g_timerqueue_lock.is_locked());
  238. if (auto* next_timer = queue.list.first())
  239. queue.next_timer_due = next_timer->m_expires;
  240. else
  241. queue.next_timer_due = {};
  242. }
  243. }