2020-01-18 08:38:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2019-12-27 00:58:28 +00:00
|
|
|
#include <AK/Function.h>
|
|
|
|
#include <AK/NonnullOwnPtr.h>
|
|
|
|
#include <AK/OwnPtr.h>
|
2020-08-25 01:35:19 +00:00
|
|
|
#include <AK/Singleton.h>
|
2020-11-15 18:58:19 +00:00
|
|
|
#include <AK/Time.h>
|
2019-12-27 00:58:28 +00:00
|
|
|
#include <Kernel/Scheduler.h>
|
2020-04-26 09:23:37 +00:00
|
|
|
#include <Kernel/Time/TimeManagement.h>
|
2019-12-27 00:58:28 +00:00
|
|
|
#include <Kernel/TimerQueue.h>
|
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-08-25 01:35:19 +00:00
|
|
|
static AK::Singleton<TimerQueue> s_the;
|
2020-11-15 18:58:19 +00:00
|
|
|
static SpinLock<u8> g_timerqueue_lock;
|
2019-12-27 00:58:28 +00:00
|
|
|
|
2021-02-27 22:56:16 +00:00
|
|
|
Time Timer::remaining() const
|
2020-12-01 22:44:52 +00:00
|
|
|
{
|
2021-02-27 22:56:16 +00:00
|
|
|
return m_remaining;
|
2020-12-01 23:53:47 +00:00
|
|
|
}
|
|
|
|
|
2021-02-27 22:56:16 +00:00
|
|
|
Time Timer::now(bool is_firing) const
|
2020-12-01 23:53:47 +00:00
|
|
|
{
|
2020-12-04 05:12:50 +00:00
|
|
|
// NOTE: If is_firing is true then TimePrecision::Precise isn't really useful here.
|
|
|
|
// We already have a quite precise time stamp because we just updated the time in the
|
|
|
|
// interrupt handler. In those cases, just use coarse timestamps.
|
|
|
|
auto clock_id = m_clock_id;
|
|
|
|
if (is_firing) {
|
|
|
|
switch (clock_id) {
|
|
|
|
case CLOCK_MONOTONIC:
|
|
|
|
clock_id = CLOCK_MONOTONIC_COARSE;
|
|
|
|
break;
|
|
|
|
case CLOCK_MONOTONIC_RAW:
|
|
|
|
// TODO: use a special CLOCK_MONOTONIC_RAW_COARSE like mechanism here
|
|
|
|
break;
|
|
|
|
case CLOCK_REALTIME:
|
|
|
|
clock_id = CLOCK_REALTIME_COARSE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-05-05 16:51:06 +00:00
|
|
|
return TimeManagement::the().current_time(clock_id);
|
2020-12-01 22:44:52 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 00:58:28 +00:00
|
|
|
TimerQueue& TimerQueue::the()
|
|
|
|
{
|
|
|
|
return *s_the;
|
|
|
|
}
|
|
|
|
|
2021-02-19 20:29:46 +00:00
|
|
|
UNMAP_AFTER_INIT TimerQueue::TimerQueue()
|
2020-04-26 09:23:37 +00:00
|
|
|
{
|
|
|
|
m_ticks_per_second = TimeManagement::the().ticks_per_second();
|
|
|
|
}
|
|
|
|
|
2021-05-19 22:41:51 +00:00
|
|
|
bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
|
2019-12-27 00:58:28 +00:00
|
|
|
{
|
2021-05-05 16:51:06 +00:00
|
|
|
if (deadline <= TimeManagement::the().current_time(clock_id))
|
2021-05-19 22:41:51 +00:00
|
|
|
return false;
|
2020-11-15 18:58:19 +00:00
|
|
|
|
|
|
|
// Because timer handlers can execute on any processor and there is
|
|
|
|
// a race between executing a timer handler and cancel_timer() this
|
|
|
|
// *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
|
|
|
|
// inadvertently cancel another timer that has been created between
|
|
|
|
// returning from the timer handler and a call to cancel_timer().
|
2021-05-19 22:41:51 +00:00
|
|
|
timer->setup(clock_id, deadline, move(callback));
|
2020-11-15 18:58:19 +00:00
|
|
|
|
|
|
|
ScopedSpinLock lock(g_timerqueue_lock);
|
2020-12-01 20:02:54 +00:00
|
|
|
timer->m_id = 0; // Don't generate a timer id
|
2021-05-19 22:41:51 +00:00
|
|
|
add_timer_locked(move(timer));
|
|
|
|
return true;
|
2020-11-15 18:58:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_timerqueue_lock);
|
2019-12-27 00:58:28 +00:00
|
|
|
|
2020-12-01 20:02:54 +00:00
|
|
|
timer->m_id = ++m_timer_id_count;
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(timer->m_id != 0); // wrapped
|
2020-11-15 18:58:19 +00:00
|
|
|
add_timer_locked(move(timer));
|
2021-02-27 23:01:47 +00:00
|
|
|
return timer->m_id;
|
2020-11-15 18:58:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
|
|
|
|
{
|
2021-02-27 22:56:16 +00:00
|
|
|
Time timer_expiration = timer->m_expires;
|
2019-12-27 00:58:28 +00:00
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!timer->is_queued());
|
2020-12-01 20:02:54 +00:00
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
auto& queue = queue_for_timer(*timer);
|
|
|
|
if (queue.list.is_empty()) {
|
|
|
|
queue.list.append(&timer.leak_ref());
|
|
|
|
queue.next_timer_due = timer_expiration;
|
2020-04-26 21:00:34 +00:00
|
|
|
} else {
|
2020-12-01 20:02:54 +00:00
|
|
|
Timer* following_timer = nullptr;
|
2020-12-01 23:53:47 +00:00
|
|
|
queue.list.for_each([&](Timer& t) {
|
2020-12-01 20:02:54 +00:00
|
|
|
if (t.m_expires > timer_expiration) {
|
|
|
|
following_timer = &t;
|
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
if (following_timer) {
|
2020-12-01 23:53:47 +00:00
|
|
|
bool next_timer_needs_update = queue.list.head() == following_timer;
|
|
|
|
queue.list.insert_before(following_timer, &timer.leak_ref());
|
2020-04-26 21:00:34 +00:00
|
|
|
if (next_timer_needs_update)
|
2020-12-01 23:53:47 +00:00
|
|
|
queue.next_timer_due = timer_expiration;
|
2020-12-01 20:02:54 +00:00
|
|
|
} else {
|
2020-12-01 23:53:47 +00:00
|
|
|
queue.list.append(&timer.leak_ref());
|
2020-04-26 21:00:34 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-27 00:58:28 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 00:36:34 +00:00
|
|
|
TimerId TimerQueue::add_timer(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
|
2019-12-27 00:58:28 +00:00
|
|
|
{
|
2021-05-05 16:51:06 +00:00
|
|
|
auto expires = TimeManagement::the().current_time(clock_id);
|
2021-02-28 00:36:34 +00:00
|
|
|
expires = expires + deadline;
|
2021-05-19 22:41:51 +00:00
|
|
|
auto timer = new Timer();
|
|
|
|
VERIFY(timer);
|
|
|
|
timer->setup(clock_id, expires, move(callback));
|
|
|
|
return add_timer(adopt_ref(*timer));
|
2019-12-27 00:58:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-26 19:59:27 +00:00
|
|
|
bool TimerQueue::cancel_timer(TimerId id)
|
2019-12-27 00:58:28 +00:00
|
|
|
{
|
2020-12-01 20:02:54 +00:00
|
|
|
Timer* found_timer = nullptr;
|
2020-12-01 23:53:47 +00:00
|
|
|
Queue* timer_queue = nullptr;
|
|
|
|
|
|
|
|
ScopedSpinLock lock(g_timerqueue_lock);
|
|
|
|
if (m_timer_queue_monotonic.list.for_each([&](Timer& timer) {
|
2020-12-01 20:02:54 +00:00
|
|
|
if (timer.m_id == id) {
|
|
|
|
found_timer = &timer;
|
2020-12-01 23:53:47 +00:00
|
|
|
timer_queue = &m_timer_queue_monotonic;
|
2020-12-01 20:02:54 +00:00
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
})
|
|
|
|
!= IterationDecision::Break) {
|
2020-12-01 23:53:47 +00:00
|
|
|
m_timer_queue_realtime.list.for_each([&](Timer& timer) {
|
|
|
|
if (timer.m_id == id) {
|
|
|
|
found_timer = &timer;
|
|
|
|
timer_queue = &m_timer_queue_realtime;
|
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found_timer) {
|
2020-12-01 20:02:54 +00:00
|
|
|
// The timer may be executing right now, if it is then it should
|
|
|
|
// be in m_timers_executing. If it is then release the lock
|
|
|
|
// briefly to allow it to finish by removing itself
|
|
|
|
// NOTE: This can only happen with multiple processors!
|
|
|
|
while (m_timers_executing.for_each([&](Timer& timer) {
|
|
|
|
if (timer.m_id == id)
|
|
|
|
return IterationDecision::Break;
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
}) == IterationDecision::Break) {
|
|
|
|
// NOTE: This isn't the most efficient way to wait, but
|
|
|
|
// it should only happen when multiple processors are used.
|
|
|
|
// Also, the timers should execute pretty quickly, so it
|
|
|
|
// should not loop here for very long. But we can't yield.
|
|
|
|
lock.unlock();
|
|
|
|
Processor::wait_check();
|
|
|
|
lock.lock();
|
|
|
|
}
|
|
|
|
// We were not able to cancel the timer, but at this point
|
|
|
|
// the handler should have completed if it was running!
|
2019-12-27 00:58:28 +00:00
|
|
|
return false;
|
2020-12-01 20:02:54 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(found_timer);
|
|
|
|
VERIFY(timer_queue);
|
2020-12-01 23:53:47 +00:00
|
|
|
remove_timer_locked(*timer_queue, *found_timer);
|
2019-12-27 00:58:28 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-12-01 20:02:54 +00:00
|
|
|
bool TimerQueue::cancel_timer(Timer& timer)
|
2020-11-15 18:58:19 +00:00
|
|
|
{
|
2020-12-01 23:53:47 +00:00
|
|
|
auto& timer_queue = queue_for_timer(timer);
|
2020-11-15 18:58:19 +00:00
|
|
|
ScopedSpinLock lock(g_timerqueue_lock);
|
2020-12-01 23:53:47 +00:00
|
|
|
if (!timer_queue.list.contains_slow(&timer)) {
|
2020-12-01 20:02:54 +00:00
|
|
|
// The timer may be executing right now, if it is then it should
|
|
|
|
// be in m_timers_executing. If it is then release the lock
|
|
|
|
// briefly to allow it to finish by removing itself
|
|
|
|
// NOTE: This can only happen with multiple processors!
|
|
|
|
while (m_timers_executing.contains_slow(&timer)) {
|
|
|
|
// NOTE: This isn't the most efficient way to wait, but
|
|
|
|
// it should only happen when multiple processors are used.
|
|
|
|
// Also, the timers should execute pretty quickly, so it
|
|
|
|
// should not loop here for very long. But we can't yield.
|
|
|
|
lock.unlock();
|
|
|
|
Processor::wait_check();
|
|
|
|
lock.lock();
|
|
|
|
}
|
|
|
|
// We were not able to cancel the timer, but at this point
|
|
|
|
// the handler should have completed if it was running!
|
2020-11-15 18:58:19 +00:00
|
|
|
return false;
|
2020-12-01 20:02:54 +00:00
|
|
|
}
|
2020-11-15 18:58:19 +00:00
|
|
|
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(timer.ref_count() > 1);
|
2020-12-01 23:53:47 +00:00
|
|
|
remove_timer_locked(timer_queue, timer);
|
2020-12-01 22:44:52 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
|
2020-12-01 22:44:52 +00:00
|
|
|
{
|
2020-12-01 23:53:47 +00:00
|
|
|
bool was_next_timer = (queue.list.head() == &timer);
|
|
|
|
queue.list.remove(&timer);
|
2020-12-01 20:02:54 +00:00
|
|
|
timer.set_queued(false);
|
2020-12-04 05:12:50 +00:00
|
|
|
auto now = timer.now(false);
|
2020-12-01 22:44:52 +00:00
|
|
|
if (timer.m_expires > now)
|
|
|
|
timer.m_remaining = timer.m_expires - now;
|
2020-11-15 18:58:19 +00:00
|
|
|
|
|
|
|
if (was_next_timer)
|
2020-12-01 23:53:47 +00:00
|
|
|
update_next_timer_due(queue);
|
2020-12-11 18:07:42 +00:00
|
|
|
// Whenever we remove a timer that was still queued (but hasn't been
|
|
|
|
// fired) we added a reference to it. So, when removing it from the
|
|
|
|
// queue we need to drop that reference.
|
|
|
|
timer.unref();
|
2020-11-15 18:58:19 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 00:58:28 +00:00
|
|
|
void TimerQueue::fire()
|
|
|
|
{
|
2020-11-15 18:58:19 +00:00
|
|
|
ScopedSpinLock lock(g_timerqueue_lock);
|
2019-12-27 00:58:28 +00:00
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
auto fire_timers = [&](Queue& queue) {
|
|
|
|
auto* timer = queue.list.head();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(timer);
|
|
|
|
VERIFY(queue.next_timer_due == timer->m_expires);
|
2019-12-27 00:58:28 +00:00
|
|
|
|
2020-12-04 05:12:50 +00:00
|
|
|
while (timer && timer->now(true) > timer->m_expires) {
|
2020-12-01 23:53:47 +00:00
|
|
|
queue.list.remove(timer);
|
2020-12-11 20:41:51 +00:00
|
|
|
timer->set_queued(false);
|
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
m_timers_executing.append(timer);
|
2020-11-15 18:58:19 +00:00
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
update_next_timer_due(queue);
|
2020-11-15 18:58:19 +00:00
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
lock.unlock();
|
2020-12-01 20:02:54 +00:00
|
|
|
|
2020-12-11 20:41:51 +00:00
|
|
|
// Defer executing the timer outside of the irq handler
|
|
|
|
Processor::current().deferred_call_queue([this, timer]() {
|
|
|
|
timer->m_callback();
|
|
|
|
ScopedSpinLock lock(g_timerqueue_lock);
|
|
|
|
m_timers_executing.remove(timer);
|
|
|
|
// Drop the reference we added when queueing the timer
|
|
|
|
timer->unref();
|
|
|
|
});
|
2020-12-01 20:02:54 +00:00
|
|
|
|
2020-12-11 20:41:51 +00:00
|
|
|
lock.lock();
|
2020-12-01 23:53:47 +00:00
|
|
|
timer = queue.list.head();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!m_timer_queue_monotonic.list.is_empty())
|
|
|
|
fire_timers(m_timer_queue_monotonic);
|
|
|
|
if (!m_timer_queue_realtime.list.is_empty())
|
|
|
|
fire_timers(m_timer_queue_realtime);
|
2019-12-27 00:58:28 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
void TimerQueue::update_next_timer_due(Queue& queue)
|
2019-12-27 00:58:28 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(g_timerqueue_lock.is_locked());
|
2020-11-15 18:58:19 +00:00
|
|
|
|
2020-12-01 23:53:47 +00:00
|
|
|
if (auto* next_timer = queue.list.head())
|
|
|
|
queue.next_timer_due = next_timer->m_expires;
|
2019-12-27 00:58:28 +00:00
|
|
|
else
|
2021-02-27 22:56:16 +00:00
|
|
|
queue.next_timer_due = {};
|
2019-12-27 00:58:28 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
}
|