TimerQueue.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Function.h>
  27. #include <AK/NonnullOwnPtr.h>
  28. #include <AK/OwnPtr.h>
  29. #include <AK/Singleton.h>
  30. #include <AK/Time.h>
  31. #include <Kernel/Scheduler.h>
  32. #include <Kernel/Time/TimeManagement.h>
  33. #include <Kernel/TimerQueue.h>
  34. namespace Kernel {
  35. static AK::Singleton<TimerQueue> s_the;
  36. static SpinLock<u8> g_timerqueue_lock;
  37. Time Timer::remaining() const
  38. {
  39. return m_remaining;
  40. }
  41. Time Timer::now(bool is_firing) const
  42. {
  43. // NOTE: If is_firing is true then TimePrecision::Precise isn't really useful here.
  44. // We already have a quite precise time stamp because we just updated the time in the
  45. // interrupt handler. In those cases, just use coarse timestamps.
  46. auto clock_id = m_clock_id;
  47. if (is_firing) {
  48. switch (clock_id) {
  49. case CLOCK_MONOTONIC:
  50. clock_id = CLOCK_MONOTONIC_COARSE;
  51. break;
  52. case CLOCK_MONOTONIC_RAW:
  53. // TODO: use a special CLOCK_MONOTONIC_RAW_COARSE like mechanism here
  54. break;
  55. case CLOCK_REALTIME:
  56. clock_id = CLOCK_REALTIME_COARSE;
  57. break;
  58. default:
  59. break;
  60. }
  61. }
  62. // FIXME: Should use AK::Time internally
  63. return Time::from_timespec(TimeManagement::the().current_time(clock_id).value());
  64. }
  65. TimerQueue& TimerQueue::the()
  66. {
  67. return *s_the;
  68. }
  69. UNMAP_AFTER_INIT TimerQueue::TimerQueue()
  70. {
  71. m_ticks_per_second = TimeManagement::the().ticks_per_second();
  72. }
  73. RefPtr<Timer> TimerQueue::add_timer_without_id(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
  74. {
  75. // FIXME: Should use AK::Time internally
  76. if (deadline <= Time::from_timespec(TimeManagement::the().current_time(clock_id).value()))
  77. return {};
  78. // Because timer handlers can execute on any processor and there is
  79. // a race between executing a timer handler and cancel_timer() this
  80. // *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
  81. // inadvertently cancel another timer that has been created between
  82. // returning from the timer handler and a call to cancel_timer().
  83. auto timer = adopt(*new Timer(clock_id, deadline, move(callback)));
  84. ScopedSpinLock lock(g_timerqueue_lock);
  85. timer->m_id = 0; // Don't generate a timer id
  86. add_timer_locked(timer);
  87. return timer;
  88. }
  89. TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
  90. {
  91. ScopedSpinLock lock(g_timerqueue_lock);
  92. timer->m_id = ++m_timer_id_count;
  93. VERIFY(timer->m_id != 0); // wrapped
  94. add_timer_locked(move(timer));
  95. return timer->m_id;
  96. }
  97. void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
  98. {
  99. Time timer_expiration = timer->m_expires;
  100. VERIFY(!timer->is_queued());
  101. auto& queue = queue_for_timer(*timer);
  102. if (queue.list.is_empty()) {
  103. queue.list.append(&timer.leak_ref());
  104. queue.next_timer_due = timer_expiration;
  105. } else {
  106. Timer* following_timer = nullptr;
  107. queue.list.for_each([&](Timer& t) {
  108. if (t.m_expires > timer_expiration) {
  109. following_timer = &t;
  110. return IterationDecision::Break;
  111. }
  112. return IterationDecision::Continue;
  113. });
  114. if (following_timer) {
  115. bool next_timer_needs_update = queue.list.head() == following_timer;
  116. queue.list.insert_before(following_timer, &timer.leak_ref());
  117. if (next_timer_needs_update)
  118. queue.next_timer_due = timer_expiration;
  119. } else {
  120. queue.list.append(&timer.leak_ref());
  121. }
  122. }
  123. }
  124. TimerId TimerQueue::add_timer(clockid_t clock_id, timeval& deadline, Function<void()>&& callback)
  125. {
  126. // FIXME: Should use AK::Time internally
  127. auto expires = TimeManagement::the().current_time(clock_id).value();
  128. timespec_add_timeval(expires, deadline, expires);
  129. return add_timer(adopt(*new Timer(clock_id, Time::from_timespec(expires), move(callback))));
  130. }
  131. bool TimerQueue::cancel_timer(TimerId id)
  132. {
  133. Timer* found_timer = nullptr;
  134. Queue* timer_queue = nullptr;
  135. ScopedSpinLock lock(g_timerqueue_lock);
  136. if (m_timer_queue_monotonic.list.for_each([&](Timer& timer) {
  137. if (timer.m_id == id) {
  138. found_timer = &timer;
  139. timer_queue = &m_timer_queue_monotonic;
  140. return IterationDecision::Break;
  141. }
  142. return IterationDecision::Continue;
  143. })
  144. != IterationDecision::Break) {
  145. m_timer_queue_realtime.list.for_each([&](Timer& timer) {
  146. if (timer.m_id == id) {
  147. found_timer = &timer;
  148. timer_queue = &m_timer_queue_realtime;
  149. return IterationDecision::Break;
  150. }
  151. return IterationDecision::Continue;
  152. });
  153. }
  154. if (!found_timer) {
  155. // The timer may be executing right now, if it is then it should
  156. // be in m_timers_executing. If it is then release the lock
  157. // briefly to allow it to finish by removing itself
  158. // NOTE: This can only happen with multiple processors!
  159. while (m_timers_executing.for_each([&](Timer& timer) {
  160. if (timer.m_id == id)
  161. return IterationDecision::Break;
  162. return IterationDecision::Continue;
  163. }) == IterationDecision::Break) {
  164. // NOTE: This isn't the most efficient way to wait, but
  165. // it should only happen when multiple processors are used.
  166. // Also, the timers should execute pretty quickly, so it
  167. // should not loop here for very long. But we can't yield.
  168. lock.unlock();
  169. Processor::wait_check();
  170. lock.lock();
  171. }
  172. // We were not able to cancel the timer, but at this point
  173. // the handler should have completed if it was running!
  174. return false;
  175. }
  176. VERIFY(found_timer);
  177. VERIFY(timer_queue);
  178. remove_timer_locked(*timer_queue, *found_timer);
  179. return true;
  180. }
  181. bool TimerQueue::cancel_timer(Timer& timer)
  182. {
  183. auto& timer_queue = queue_for_timer(timer);
  184. ScopedSpinLock lock(g_timerqueue_lock);
  185. if (!timer_queue.list.contains_slow(&timer)) {
  186. // The timer may be executing right now, if it is then it should
  187. // be in m_timers_executing. If it is then release the lock
  188. // briefly to allow it to finish by removing itself
  189. // NOTE: This can only happen with multiple processors!
  190. while (m_timers_executing.contains_slow(&timer)) {
  191. // NOTE: This isn't the most efficient way to wait, but
  192. // it should only happen when multiple processors are used.
  193. // Also, the timers should execute pretty quickly, so it
  194. // should not loop here for very long. But we can't yield.
  195. lock.unlock();
  196. Processor::wait_check();
  197. lock.lock();
  198. }
  199. // We were not able to cancel the timer, but at this point
  200. // the handler should have completed if it was running!
  201. return false;
  202. }
  203. VERIFY(timer.ref_count() > 1);
  204. remove_timer_locked(timer_queue, timer);
  205. return true;
  206. }
  207. void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
  208. {
  209. bool was_next_timer = (queue.list.head() == &timer);
  210. queue.list.remove(&timer);
  211. timer.set_queued(false);
  212. auto now = timer.now(false);
  213. if (timer.m_expires > now)
  214. timer.m_remaining = timer.m_expires - now;
  215. if (was_next_timer)
  216. update_next_timer_due(queue);
  217. // Whenever we remove a timer that was still queued (but hasn't been
  218. // fired) we added a reference to it. So, when removing it from the
  219. // queue we need to drop that reference.
  220. timer.unref();
  221. }
  222. void TimerQueue::fire()
  223. {
  224. ScopedSpinLock lock(g_timerqueue_lock);
  225. auto fire_timers = [&](Queue& queue) {
  226. auto* timer = queue.list.head();
  227. VERIFY(timer);
  228. VERIFY(queue.next_timer_due == timer->m_expires);
  229. while (timer && timer->now(true) > timer->m_expires) {
  230. queue.list.remove(timer);
  231. timer->set_queued(false);
  232. m_timers_executing.append(timer);
  233. update_next_timer_due(queue);
  234. lock.unlock();
  235. // Defer executing the timer outside of the irq handler
  236. Processor::current().deferred_call_queue([this, timer]() {
  237. timer->m_callback();
  238. ScopedSpinLock lock(g_timerqueue_lock);
  239. m_timers_executing.remove(timer);
  240. // Drop the reference we added when queueing the timer
  241. timer->unref();
  242. });
  243. lock.lock();
  244. timer = queue.list.head();
  245. }
  246. };
  247. if (!m_timer_queue_monotonic.list.is_empty())
  248. fire_timers(m_timer_queue_monotonic);
  249. if (!m_timer_queue_realtime.list.is_empty())
  250. fire_timers(m_timer_queue_realtime);
  251. }
  252. void TimerQueue::update_next_timer_due(Queue& queue)
  253. {
  254. VERIFY(g_timerqueue_lock.is_locked());
  255. if (auto* next_timer = queue.list.head())
  256. queue.next_timer_due = next_timer->m_expires;
  257. else
  258. queue.next_timer_due = {};
  259. }
  260. }