Kernel: Move some time related code from Scheduler into TimeManagement

Use the TimerQueue to expire blocking operations, which is one less thing
the Scheduler needs to check on every iteration.

Also, add a BlockTimeout class that will automatically handle relative or
absolute timeouts as well as overriding timeouts (e.g. socket timeouts)
more consistently.

Also, rework the TimerQueue class to be able to fire events from
any processor, which requires Timer to be RefCounted. Also allow
creating id-less timers for use by blocking operations.
This commit is contained in:
Tom 2020-11-15 11:58:19 -07:00 committed by Andreas Kling
parent e0e26c6c67
commit 6cb640eeba
Notes: sideshowbarker 2024-07-19 01:08:58 +09:00
19 changed files with 461 additions and 263 deletions

View file

@ -74,7 +74,7 @@ auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
auto request_result = get_request_result();
if (is_completed_result(request_result))
return { request_result, Thread::BlockResult::NotBlocked };
auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout);
auto wait_result = Thread::current()->wait_on(m_queue, name(), Thread::BlockTimeout(false, timeout));
return { get_request_result(), wait_result };
}

View file

@ -431,7 +431,7 @@ static Optional<KBuffer> procfs$devices(InodeIdentifier)
static Optional<KBuffer> procfs$uptime(InodeIdentifier)
{
KBufferBuilder builder;
builder.appendf("%u\n", (g_uptime / 1000));
builder.appendf("%llu\n", TimeManagement::the().uptime_ms() / 1000);
return builder.build();
}

View file

@ -81,7 +81,7 @@ KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2)
dbg() << " " << (void*)event.stack[i];
#endif
event.timestamp = g_uptime;
event.timestamp = TimeManagement::the().uptime_ms();
at(m_count++) = event;
return KSuccess;
}

View file

@ -519,7 +519,7 @@ int Process::alloc_fd(int first_candidate_fd)
timeval kgettimeofday()
{
return g_timeofday;
return TimeManagement::now_as_timeval();
}
void kgettimeofday(timeval& tv)

View file

@ -56,7 +56,6 @@ public:
};
SchedulerData* g_scheduler_data;
timeval g_timeofday;
RecursiveSpinLock g_scheduler_lock;
void Scheduler::init_thread(Thread& thread)
@ -73,16 +72,10 @@ static u32 time_slice_for(const Thread& thread)
return 10;
}
timeval Scheduler::time_since_boot()
{
return { TimeManagement::the().seconds_since_boot(), (suseconds_t)TimeManagement::the().ticks_this_second() * 1000 };
}
Thread* g_finalizer;
WaitQueue* g_finalizer_wait_queue;
Atomic<bool> g_finalizer_has_work { false };
static Process* s_colonel_process;
u64 g_uptime;
Thread::JoinBlocker::JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value)
: m_joinee(&joinee)
@ -179,16 +172,15 @@ Thread::WriteBlocker::WriteBlocker(const FileDescription& description)
{
}
timespec* Thread::WriteBlocker::override_timeout(timespec* timeout)
auto Thread::WriteBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
auto& description = blocked_description();
if (description.is_socket()) {
auto& socket = *description.socket();
if (socket.has_send_timeout()) {
timeval_to_timespec(Scheduler::time_since_boot(), m_deadline);
timespec_add_timeval(m_deadline, socket.send_timeout(), m_deadline);
if (!timeout || m_deadline < *timeout)
return &m_deadline;
m_timeout = BlockTimeout(false, &socket.send_timeout(), timeout.start_time());
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
return m_timeout;
}
}
return timeout;
@ -204,16 +196,15 @@ Thread::ReadBlocker::ReadBlocker(const FileDescription& description)
{
}
timespec* Thread::ReadBlocker::override_timeout(timespec* timeout)
auto Thread::ReadBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
auto& description = blocked_description();
if (description.is_socket()) {
auto& socket = *description.socket();
if (socket.has_receive_timeout()) {
timeval_to_timespec(Scheduler::time_since_boot(), m_deadline);
timespec_add_timeval(m_deadline, socket.receive_timeout(), m_deadline);
if (!timeout || m_deadline < *timeout)
return &m_deadline;
m_timeout = BlockTimeout(false, &socket.receive_timeout(), timeout.start_time());
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
return m_timeout;
}
}
return timeout;
@ -236,14 +227,36 @@ bool Thread::ConditionBlocker::should_unblock(Thread&)
return m_block_until_condition();
}
Thread::SleepBlocker::SleepBlocker(u64 wakeup_time)
: m_wakeup_time(wakeup_time)
Thread::SleepBlocker::SleepBlocker(const BlockTimeout& deadline, timespec* remaining)
: m_deadline(deadline)
, m_remaining(remaining)
{
}
bool Thread::SleepBlocker::should_unblock(Thread&)
auto Thread::SleepBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
return m_wakeup_time <= g_uptime;
ASSERT(timeout.is_infinite()); // A timeout should not be provided
// To simplify things only use the sleep deadline.
return m_deadline;
}
void Thread::SleepBlocker::was_unblocked()
{
if (!m_remaining)
return;
auto time_now = TimeManagement::the().monotonic_time();
if (time_now < m_deadline.absolute_time())
timespec_sub(m_deadline.absolute_time(), time_now, *m_remaining);
else
*m_remaining = {};
}
Thread::BlockResult Thread::SleepBlocker::block_result(bool did_timeout)
{
auto result = Blocker::block_result(did_timeout);
if (result == Thread::BlockResult::InterruptedByTimeout)
return Thread::BlockResult::WokeNormally;
return result;
}
Thread::SelectBlocker::SelectBlocker(const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds)
@ -328,7 +341,7 @@ bool Thread::SemiPermanentBlocker::should_unblock(Thread&)
// Called by the scheduler on threads that are blocked for some reason.
// Make a decision as to whether to unblock them or not.
void Thread::consider_unblock(time_t now_sec, long now_usec)
void Thread::consider_unblock()
{
ScopedSpinLock lock(m_lock);
switch (state()) {
@ -343,11 +356,7 @@ void Thread::consider_unblock(time_t now_sec, long now_usec)
return;
case Thread::Blocked: {
ASSERT(m_blocker != nullptr);
timespec now;
now.tv_sec = now_sec,
now.tv_nsec = now_usec * 1000ull;
bool timed_out = m_blocker_timeout && now >= *m_blocker_timeout;
if (timed_out || m_blocker->should_unblock(*this))
if (m_blocker->should_unblock(*this))
unblock();
return;
}
@ -383,10 +392,6 @@ bool Scheduler::pick_next()
ASSERT_INTERRUPTS_DISABLED();
auto current_thread = Thread::current();
auto now = time_since_boot();
auto now_sec = now.tv_sec;
auto now_usec = now.tv_usec;
// Set the m_in_scheduler flag before acquiring the spinlock. This
// prevents a recursive call into Scheduler::invoke_async upon
@ -422,7 +427,7 @@ bool Scheduler::pick_next()
// Check and unblock threads whose wait conditions have been met.
Scheduler::for_each_nonrunnable([&](Thread& thread) {
thread.consider_unblock(now_sec, now_usec);
thread.consider_unblock();
return IterationDecision::Continue;
});
@ -436,7 +441,7 @@ bool Scheduler::pick_next()
}
return IterationDecision::Continue;
}
if (process.m_alarm_deadline && g_uptime > process.m_alarm_deadline) {
if (process.m_alarm_deadline && TimeManagement::the().uptime_ms() > process.m_alarm_deadline) {
process.m_alarm_deadline = 0;
// FIXME: Should we observe this signal somehow?
(void)process.send_signal(SIGALRM, nullptr);
@ -788,28 +793,18 @@ void Scheduler::timer_tick(const RegisterState& regs)
bool is_bsp = Processor::current().id() == 0;
if (!is_bsp)
return; // TODO: This prevents scheduling on other CPUs!
if (is_bsp) {
// TODO: We should probably move this out of the scheduler
++g_uptime;
g_timeofday = TimeManagement::now_as_timeval();
}
if (current_thread->process().is_profiling()) {
SmapDisabler disabler;
auto backtrace = current_thread->raw_backtrace(regs.ebp, regs.eip);
auto& sample = Profiling::next_sample_slot();
sample.pid = current_thread->process().pid();
sample.tid = current_thread->tid();
sample.timestamp = g_uptime;
sample.timestamp = TimeManagement::the().uptime_ms();
for (size_t i = 0; i < min(backtrace.size(), Profiling::max_stack_frame_count); ++i) {
sample.frames[i] = backtrace[i];
}
}
if (is_bsp)
TimerQueue::the().fire();
if (current_thread->tick())
return;

View file

@ -31,6 +31,7 @@
#include <AK/IntrusiveList.h>
#include <AK/Types.h>
#include <Kernel/SpinLock.h>
#include <Kernel/Time/TimeManagement.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
@ -44,9 +45,7 @@ struct SchedulerData;
extern Thread* g_finalizer;
extern WaitQueue* g_finalizer_wait_queue;
extern Atomic<bool> g_finalizer_has_work;
extern u64 g_uptime;
extern SchedulerData* g_scheduler_data;
extern timeval g_timeofday;
extern RecursiveSpinLock g_scheduler_lock;
class Scheduler {
@ -57,7 +56,6 @@ public:
static void timer_tick(const RegisterState&);
[[noreturn]] static void start();
static bool pick_next();
static timeval time_since_boot();
static bool yield();
static bool donate_to_and_switch(Thread*, const char* reason);
static bool donate_to(RefPtr<Thread>&, const char* reason);

View file

@ -33,14 +33,15 @@ unsigned Process::sys$alarm(unsigned seconds)
{
REQUIRE_PROMISE(stdio);
unsigned previous_alarm_remaining = 0;
if (m_alarm_deadline && m_alarm_deadline > g_uptime) {
previous_alarm_remaining = (m_alarm_deadline - g_uptime) / TimeManagement::the().ticks_per_second();
auto uptime = TimeManagement::the().uptime_ms();
if (m_alarm_deadline && m_alarm_deadline > uptime) {
previous_alarm_remaining = m_alarm_deadline - uptime;
}
if (!seconds) {
m_alarm_deadline = 0;
return previous_alarm_remaining;
}
m_alarm_deadline = g_uptime + seconds * TimeManagement::the().ticks_per_second();
m_alarm_deadline = uptime + seconds * 1000;
return previous_alarm_remaining;
}

View file

@ -32,9 +32,9 @@ namespace Kernel {
int Process::sys$beep()
{
PCSpeaker::tone_on(440);
u64 wakeup_time = Thread::current()->sleep(100);
auto result = Thread::current()->sleep({ 0, 200 });
PCSpeaker::tone_off();
if (wakeup_time > g_uptime)
if (result.was_interrupted())
return -EINTR;
return 0;
}

View file

@ -37,13 +37,9 @@ int Process::sys$clock_gettime(clockid_t clock_id, Userspace<timespec*> user_ts)
timespec ts = {};
switch (clock_id) {
case CLOCK_MONOTONIC: {
auto ticks_per_second = TimeManagement::the().ticks_per_second();
auto uptime = g_uptime;
ts.tv_sec = uptime / ticks_per_second;
ts.tv_nsec = (1000000000 * (uptime % ticks_per_second)) / ticks_per_second;
case CLOCK_MONOTONIC:
ts = TimeManagement::the().monotonic_time();
break;
}
case CLOCK_REALTIME:
ts = TimeManagement::the().epoch_time();
break;
@ -91,29 +87,19 @@ int Process::sys$clock_nanosleep(Userspace<const Syscall::SC_clock_nanosleep_par
bool is_absolute = params.flags & TIMER_ABSTIME;
auto ticks_per_second = TimeManagement::the().ticks_per_second();
switch (params.clock_id) {
case CLOCK_MONOTONIC: {
u64 ticks_to_sleep = requested_sleep.tv_sec * ticks_per_second;
ticks_to_sleep += (requested_sleep.tv_nsec * ticks_per_second) / 1000000000;
if (is_absolute)
ticks_to_sleep -= g_uptime;
if (!ticks_to_sleep)
return 0;
u64 wakeup_time = Thread::current()->sleep(ticks_to_sleep);
if (wakeup_time > g_uptime) {
u64 ticks_left = wakeup_time - g_uptime;
if (!is_absolute && params.remaining_sleep) {
timespec remaining_sleep = {};
remaining_sleep.tv_sec = ticks_left / ticks_per_second;
ticks_left -= remaining_sleep.tv_sec * ticks_per_second;
remaining_sleep.tv_nsec = (ticks_left * 1000000000) / ticks_per_second;
if (!copy_to_user(params.remaining_sleep, &remaining_sleep))
return -EFAULT;
}
return -EINTR;
bool was_interrupted;
if (is_absolute) {
was_interrupted = Thread::current()->sleep_until(requested_sleep).was_interrupted();
} else {
timespec remaining_sleep;
was_interrupted = Thread::current()->sleep(requested_sleep, &remaining_sleep).was_interrupted();
if (was_interrupted && params.remaining_sleep && !copy_to_user(params.remaining_sleep, &remaining_sleep))
return -EFAULT;
}
if (was_interrupted)
return -EINTR;
return 0;
}
default:

View file

@ -29,19 +29,6 @@
namespace Kernel {
static void compute_relative_timeout_from_absolute(const timeval& absolute_time, timeval& relative_time)
{
// Convert absolute time to relative time of day.
timeval_sub(absolute_time, kgettimeofday(), relative_time);
}
static void compute_relative_timeout_from_absolute(const timespec& absolute_time, timeval& relative_time)
{
timeval tv_absolute_time;
timespec_to_timeval(absolute_time, tv_absolute_time);
compute_relative_timeout_from_absolute(tv_absolute_time, relative_time);
}
WaitQueue& Process::futex_queue(Userspace<const i32*> userspace_address)
{
auto& queue = m_futex_queues.ensure(userspace_address.ptr());
@ -66,20 +53,17 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
if (user_value != params.val)
return -EAGAIN;
timespec ts_abstimeout { 0, 0 };
if (params.timeout && !copy_from_user(&ts_abstimeout, params.timeout))
return -EFAULT;
timeval* optional_timeout = nullptr;
timeval relative_timeout { 0, 0 };
Thread::BlockTimeout timeout;
if (params.timeout) {
compute_relative_timeout_from_absolute(ts_abstimeout, relative_timeout);
optional_timeout = &relative_timeout;
timespec ts_abstimeout { 0, 0 };
if (!copy_from_user(&ts_abstimeout, params.timeout))
return -EFAULT;
timeout = Thread::BlockTimeout(true, &ts_abstimeout);
}
// FIXME: This is supposed to be interruptible by a signal, but right now WaitQueue cannot be interrupted.
WaitQueue& wait_queue = futex_queue((FlatPtr)params.userspace_address);
Thread::BlockResult result = Thread::current()->wait_on(wait_queue, "Futex", optional_timeout);
Thread::BlockResult result = Thread::current()->wait_on(wait_queue, "Futex", timeout);
if (result == Thread::BlockResult::InterruptedByTimeout) {
return -ETIMEDOUT;
}

View file

@ -46,18 +46,12 @@ int Process::sys$select(const Syscall::SC_select_params* user_params)
if (params.nfds < 0)
return -EINVAL;
timespec computed_timeout;
bool select_has_timeout = false;
Thread::BlockTimeout timeout;
if (params.timeout) {
timespec timeout_copy;
if (!copy_from_user(&timeout_copy, params.timeout))
return -EFAULT;
if (timeout_copy.tv_sec || timeout_copy.tv_nsec) {
timespec ts_since_boot;
timeval_to_timespec(Scheduler::time_since_boot(), ts_since_boot);
timespec_add(ts_since_boot, timeout_copy, computed_timeout);
select_has_timeout = true;
}
timeout = Thread::BlockTimeout(false, &timeout_copy);
}
auto current_thread = Thread::current();
@ -107,8 +101,8 @@ int Process::sys$select(const Syscall::SC_select_params* user_params)
dbg() << "selecting on (read:" << rfds.size() << ", write:" << wfds.size() << "), timeout=" << params.timeout;
#endif
if (!params.timeout || select_has_timeout) {
if (current_thread->block<Thread::SelectBlocker>(select_has_timeout ? &computed_timeout : nullptr, rfds, wfds, efds).was_interrupted())
if (timeout.should_block()) {
if (current_thread->block<Thread::SelectBlocker>(timeout, rfds, wfds, efds).was_interrupted())
return -EINTR;
}
@ -148,9 +142,13 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
SmapDisabler disabler;
timespec timeout = {};
if (params.timeout && !copy_from_user(&timeout, params.timeout))
return -EFAULT;
Thread::BlockTimeout timeout;
if (params.timeout) {
timespec timeout_copy;
if (!copy_from_user(&timeout_copy, params.timeout))
return -EFAULT;
timeout = Thread::BlockTimeout(false, &timeout_copy);
}
sigset_t sigmask = {};
if (params.sigmask && !copy_from_user(&sigmask, params.sigmask))
@ -178,15 +176,6 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
wfds.append(pfd.fd);
}
timespec actual_timeout;
bool has_timeout = false;
if (params.timeout && (timeout.tv_sec || timeout.tv_nsec)) {
timespec ts_since_boot;
timeval_to_timespec(Scheduler::time_since_boot(), ts_since_boot);
timespec_add(ts_since_boot, timeout, actual_timeout);
has_timeout = true;
}
auto current_thread = Thread::current();
u32 previous_signal_mask = 0;
@ -198,11 +187,11 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
});
#if defined(DEBUG_IO) || defined(DEBUG_POLL_SELECT)
dbg() << "polling on (read:" << rfds.size() << ", write:" << wfds.size() << "), timeout=" << timeout.tv_sec << "s" << timeout.tv_nsec << "ns";
dbg() << "polling on (read:" << rfds.size() << ", write:" << wfds.size() << ")";
#endif
if (!params.timeout || has_timeout) {
if (current_thread->block<Thread::SelectBlocker>(has_timeout ? &actual_timeout : nullptr, rfds, wfds, Thread::SelectBlocker::FDVector()).was_interrupted())
if (timeout.should_block()) {
if (current_thread->block<Thread::SelectBlocker>(timeout, rfds, wfds, Thread::SelectBlocker::FDVector()).was_interrupted())
return -EINTR;
}

View file

@ -40,7 +40,7 @@ clock_t Process::sys$times(Userspace<tms*> user_times)
if (!copy_to_user(user_times, &times))
return -EFAULT;
return g_uptime & 0x7fffffff;
return TimeManagement::the().uptime_ms() & 0x7fffffff;
}
}

View file

@ -38,7 +38,7 @@ void SyncTask::spawn()
dbg() << "SyncTask is running";
for (;;) {
VFS::the().sync();
Thread::current()->sleep(1 * TimeManagement::the().ticks_per_second());
Thread::current()->sleep({ 1, 0 });
}
});
}

View file

@ -26,6 +26,7 @@
#include <AK/Demangle.h>
#include <AK/StringBuilder.h>
#include <AK/Time.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/FileSystem/FileDescription.h>
#include <Kernel/KSyms.h>
@ -247,15 +248,16 @@ void Thread::relock_process(bool did_unlock)
Processor::current().restore_critical(prev_crit, prev_flags);
}
u64 Thread::sleep(u64 ticks)
auto Thread::sleep(const timespec& duration, timespec* remaining_time) -> BlockResult
{
ASSERT(state() == Thread::Running);
u64 wakeup_time = g_uptime + ticks;
auto ret = Thread::current()->block<Thread::SleepBlocker>(nullptr, wakeup_time);
if (wakeup_time > g_uptime) {
ASSERT(ret.was_interrupted());
}
return wakeup_time;
return Thread::current()->block<Thread::SleepBlocker>(nullptr, Thread::BlockTimeout(false, &duration), remaining_time);
}
auto Thread::sleep_until(const timespec& deadline) -> BlockResult
{
ASSERT(state() == Thread::Running);
return Thread::current()->block<Thread::SleepBlocker>(nullptr, Thread::BlockTimeout(true, &deadline));
}
const char* Thread::state_string() const
@ -990,10 +992,12 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
return stream << value.process().name() << "(" << value.pid().value() << ":" << value.tid().value() << ")";
}
Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, RefPtr<Thread> beneficiary)
Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const BlockTimeout& timeout, Atomic<bool>* lock, RefPtr<Thread> beneficiary)
{
auto* current_thread = Thread::current();
TimerId timer_id {};
RefPtr<Timer> timer;
bool block_finished = false;
bool did_timeout = false;
bool did_unlock;
{
@ -1004,6 +1008,25 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// we need to wait until the scheduler lock is released again
{
ScopedSpinLock sched_lock(g_scheduler_lock);
if (!timeout.is_infinite()) {
timer = TimerQueue::the().add_timer_without_id(timeout.absolute_time(), [&]() {
// NOTE: this may execute on the same or any other processor!
ScopedSpinLock lock(g_scheduler_lock);
if (!block_finished) {
did_timeout = true;
wake_from_queue();
}
});
if (!timer) {
dbg() << "wait_on timed out before blocking";
// We timed out already, don't block
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called
critical.set_interrupt_flag_on_destruction(true);
return BlockResult::InterruptedByTimeout;
}
}
// m_queue can only be accessed safely if g_scheduler_lock is held!
m_queue = &queue;
if (!queue.enqueue(*current_thread)) {
@ -1014,7 +1037,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called
critical.set_interrupt_flag_on_destruction(true);
return BlockResult::NotBlocked;
}
@ -1024,12 +1046,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
set_state(State::Queued);
m_wait_reason = reason;
if (timeout) {
timer_id = TimerQueue::the().add_timer(*timeout, [&]() {
wake_from_queue();
});
}
// Yield and wait for the queue to wake us up again.
if (beneficiary)
Scheduler::donate_to(beneficiary, reason);
@ -1058,6 +1074,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// To be able to look at m_wait_queue_node we once again need the
// scheduler lock, which is held when we insert into the queue
ScopedSpinLock sched_lock(g_scheduler_lock);
block_finished = true;
if (m_queue) {
ASSERT(m_queue == &queue);
@ -1071,10 +1088,13 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// In this case, the queue should not contain us anymore.
result = BlockResult::InterruptedByDeath;
}
}
// Make sure we cancel the timer if woke normally.
if (timeout && !result.was_interrupted())
TimerQueue::the().cancel_timer(timer_id);
if (timer && !did_timeout) {
// Cancel the timer while not holding any locks. This allows
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
TimerQueue::the().cancel_timer(timer.release_nonnull());
}
// The API contract guarantees we return with interrupts enabled,

View file

@ -31,6 +31,7 @@
#include <AK/Optional.h>
#include <AK/OwnPtr.h>
#include <AK/String.h>
#include <AK/Time.h>
#include <AK/Vector.h>
#include <AK/WeakPtr.h>
#include <AK/Weakable.h>
@ -39,6 +40,7 @@
#include <Kernel/KResult.h>
#include <Kernel/Scheduler.h>
#include <Kernel/ThreadTracer.h>
#include <Kernel/TimerQueue.h>
#include <Kernel/UnixTypes.h>
#include <LibC/fd_set.h>
#include <LibELF/AuxiliaryVector.h>
@ -178,33 +180,129 @@ public:
Queued,
};
class BlockResult {
public:
enum Type {
WokeNormally,
NotBlocked,
InterruptedBySignal,
InterruptedByDeath,
InterruptedByTimeout,
};
BlockResult() = delete;
BlockResult(Type type)
: m_type(type)
{
}
bool operator==(Type type) const
{
return m_type == type;
}
bool operator!=(Type type) const
{
return m_type != type;
}
bool was_interrupted() const
{
switch (m_type) {
case InterruptedBySignal:
case InterruptedByDeath:
return true;
default:
return false;
}
}
bool timed_out() const
{
return m_type == InterruptedByTimeout;
}
private:
Type m_type;
};
class BlockTimeout {
public:
BlockTimeout()
: m_infinite(true)
{
}
BlockTimeout(std::nullptr_t)
: m_infinite(true)
{
}
explicit BlockTimeout(bool is_absolute, const timeval* time, const timespec* start_time = nullptr)
: m_infinite(!time)
{
if (!m_infinite) {
if (time->tv_sec > 0 || time->tv_usec > 0) {
timeval_to_timespec(*time, m_time);
m_should_block = true;
}
m_start_time = start_time ? *start_time : TimeManagement::the().monotonic_time();
if (!is_absolute)
timespec_add(m_time, m_start_time, m_time);
}
}
explicit BlockTimeout(bool is_absolute, const timespec* time, const timespec* start_time = nullptr)
: m_infinite(!time)
{
if (!m_infinite) {
if (time->tv_sec > 0 || time->tv_nsec > 0) {
m_time = *time;
m_should_block = true;
}
m_start_time = start_time ? *start_time : TimeManagement::the().monotonic_time();
if (!is_absolute)
timespec_add(m_time, m_start_time, m_time);
}
}
const timespec& absolute_time() const { return m_time; }
const timespec* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
bool is_infinite() const { return m_infinite; }
bool should_block() const { return m_infinite || m_should_block; };
private:
timespec m_time { 0, 0 };
timespec m_start_time { 0, 0 };
bool m_infinite { false };
bool m_should_block { false };
};
class Blocker {
public:
virtual ~Blocker() { }
virtual bool should_unblock(Thread&) = 0;
virtual const char* state_string() const = 0;
virtual bool is_reason_signal() const { return false; }
virtual timespec* override_timeout(timespec* timeout) { return timeout; }
virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
virtual void was_unblocked() { }
void set_interrupted_by_death()
{
ScopedSpinLock lock(m_lock);
m_was_interrupted_by_death = true;
}
bool was_interrupted_by_death() const
{
ScopedSpinLock lock(m_lock);
return m_was_interrupted_by_death;
}
void set_interrupted_by_signal()
{
ScopedSpinLock lock(m_lock);
m_was_interrupted_while_blocked = true;
}
bool was_interrupted_by_signal() const
virtual Thread::BlockResult block_result(bool did_timeout)
{
ScopedSpinLock lock(m_lock);
return m_was_interrupted_while_blocked;
if (m_was_interrupted_by_death)
return Thread::BlockResult::InterruptedByDeath;
if (m_was_interrupted_while_blocked)
return Thread::BlockResult::InterruptedBySignal;
if (did_timeout)
return Thread::BlockResult::InterruptedByTimeout;
return Thread::BlockResult::WokeNormally;
}
protected:
@ -260,10 +358,10 @@ public:
explicit WriteBlocker(const FileDescription&);
virtual bool should_unblock(Thread&) override;
virtual const char* state_string() const override { return "Writing"; }
virtual timespec* override_timeout(timespec*) override;
virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
private:
timespec m_deadline;
BlockTimeout m_timeout;
};
class ReadBlocker final : public FileDescriptionBlocker {
@ -271,10 +369,10 @@ public:
explicit ReadBlocker(const FileDescription&);
virtual bool should_unblock(Thread&) override;
virtual const char* state_string() const override { return "Reading"; }
virtual timespec* override_timeout(timespec*) override;
virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
private:
timespec m_deadline;
BlockTimeout m_timeout;
};
class ConditionBlocker final : public Blocker {
@ -290,12 +388,16 @@ public:
class SleepBlocker final : public Blocker {
public:
explicit SleepBlocker(u64 wakeup_time);
virtual bool should_unblock(Thread&) override;
explicit SleepBlocker(const BlockTimeout&, timespec* = nullptr);
virtual bool should_unblock(Thread&) override { return false; }
virtual const char* state_string() const override { return "Sleeping"; }
virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
virtual void was_unblocked() override;
virtual Thread::BlockResult block_result(bool) override;
private:
u64 m_wakeup_time { 0 };
BlockTimeout m_deadline;
timespec* m_remaining;
};
class SelectBlocker final : public Blocker {
@ -376,95 +478,79 @@ public:
VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
size_t thread_specific_region_size() const { return m_thread_specific_region_size; }
u64 sleep(u64 ticks);
class BlockResult {
public:
enum Type {
WokeNormally,
NotBlocked,
InterruptedBySignal,
InterruptedByDeath,
InterruptedByTimeout,
};
BlockResult() = delete;
BlockResult(Type type)
: m_type(type)
{
}
bool operator==(Type type) const
{
return m_type == type;
}
bool was_interrupted() const
{
switch (m_type) {
case InterruptedBySignal:
case InterruptedByDeath:
case InterruptedByTimeout:
return true;
default:
return false;
}
}
private:
Type m_type;
};
template<typename T, class... Args>
[[nodiscard]] BlockResult block(timespec* timeout, Args&&... args)
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{
T t(forward<Args>(args)...);
bool did_timeout = false;
RefPtr<Timer> timer;
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
m_blocker = &t;
if (t.should_unblock(*this)) {
// Don't block if the wake condition is already met
t.was_unblocked();
m_blocker = nullptr;
return BlockResult::NotBlocked;
}
m_blocker = &t;
m_blocker_timeout = t.override_timeout(timeout);
}
auto& block_timeout = t.override_timeout(timeout);
if (!block_timeout.is_infinite()) {
m_blocker_timeout = timer = TimerQueue::the().add_timer_without_id(block_timeout.absolute_time(), [&]() {
// NOTE: this may execute on the same or any other processor!
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
if (m_blocker) {
m_blocker_timeout = nullptr;
unblock();
}
});
if (!m_blocker_timeout) {
// Timeout is already in the past
t.was_unblocked();
m_blocker = nullptr;
return BlockResult::InterruptedByTimeout;
}
} else {
m_blocker_timeout = nullptr;
}
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
set_state(Thread::Blocked);
}
// Yield to the scheduler, and wait for us to resume unblocked.
yield_without_holding_big_lock();
// Acquire our lock again
ScopedSpinLock lock(m_lock);
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
// We should no longer be blocked once we woke up
ASSERT(state() != Thread::Blocked);
// We should no longer be blocked once we woke up
ASSERT(state() != Thread::Blocked);
// Remove ourselves...
m_blocker = nullptr;
m_blocker_timeout = nullptr;
// Remove ourselves...
m_blocker = nullptr;
if (timer && !m_blocker_timeout)
did_timeout = true;
}
if (timer && !did_timeout) {
// Cancel the timer while not holding any locks. This allows
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
TimerQueue::the().cancel_timer(timer.release_nonnull());
}
// Notify the blocker that we are no longer blocking. It may need
// to clean up now while we're still holding m_lock
t.was_unblocked();
if (t.was_interrupted_by_death())
return BlockResult::InterruptedByDeath;
if (t.was_interrupted_by_signal())
return BlockResult::InterruptedBySignal;
return BlockResult::WokeNormally;
return t.block_result(did_timeout);
}
[[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
@ -472,11 +558,14 @@ public:
return block<ConditionBlocker>(nullptr, state_string, move(condition));
}
BlockResult wait_on(WaitQueue& queue, const char* reason, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, RefPtr<Thread> beneficiary = {});
BlockResult wait_on(WaitQueue& queue, const char* reason, const BlockTimeout& = nullptr, Atomic<bool>* lock = nullptr, RefPtr<Thread> beneficiary = {});
void wake_from_queue();
void unblock();
BlockResult sleep(const timespec&, timespec* = nullptr);
BlockResult sleep_until(const timespec&);
// Tell this thread to unblock if needed,
// gracefully unwind the stack and die.
void set_should_die();
@ -497,7 +586,7 @@ public:
void send_urgent_signal_to_self(u8 signal);
void send_signal(u8 signal, Process* sender);
void consider_unblock(time_t now_sec, long now_usec);
void consider_unblock();
u32 update_signal_mask(u32 signal_mask);
u32 signal_mask_block(sigset_t signal_set, bool block);
@ -652,7 +741,7 @@ private:
size_t m_thread_specific_region_size { 0 };
SignalActionData m_signal_action_data[32];
Blocker* m_blocker { nullptr };
timespec* m_blocker_timeout { nullptr };
RefPtr<Timer> m_blocker_timeout;
const char* m_wait_reason { nullptr };
WaitQueue* m_queue { nullptr };

View file

@ -38,6 +38,7 @@
#include <Kernel/Time/PIT.h>
#include <Kernel/Time/RTC.h>
#include <Kernel/Time/TimeManagement.h>
#include <Kernel/TimerQueue.h>
#include <Kernel/VM/MemoryManager.h>
//#define TIME_DEBUG
@ -56,6 +57,22 @@ bool TimeManagement::is_system_timer(const HardwareTimerBase& timer) const
return &timer == m_system_timer.ptr();
}
timespec TimeManagement::ticks_to_time(u64 ticks, time_t ticks_per_second)
{
timespec tspec;
tspec.tv_sec = ticks / ticks_per_second;
tspec.tv_nsec = (ticks % ticks_per_second) * (1'000'000'000 / ticks_per_second);
ASSERT(tspec.tv_nsec <= 1'000'000'000);
return tspec;
}
u64 TimeManagement::time_to_ticks(const timespec& tspec, time_t ticks_per_second)
{
u64 ticks = (u64)tspec.tv_sec * ticks_per_second;
ticks += ((u64)tspec.tv_nsec * ticks_per_second) / 1'000'000'000;
return ticks;
}
void TimeManagement::set_epoch_time(timespec ts)
{
InterruptDisabler disabler;
@ -63,9 +80,42 @@ void TimeManagement::set_epoch_time(timespec ts)
m_remaining_epoch_time_adjustment = { 0, 0 };
}
u64 TimeManagement::monotonic_ticks() const
{
long seconds;
u64 ticks;
u32 update_iteration;
do {
update_iteration = m_update1.load(AK::MemoryOrder::memory_order_acquire);
seconds = m_seconds_since_boot;
ticks = m_ticks_this_second;
} while (update_iteration != m_update2.load(AK::MemoryOrder::memory_order_acquire));
return ticks + (u64)seconds * (u64)ticks_per_second();
}
timespec TimeManagement::monotonic_time() const
{
return ticks_to_time(monotonic_ticks(), ticks_per_second());
}
timespec TimeManagement::epoch_time() const
{
return m_epoch_time;
timespec ts;
u32 update_iteration;
do {
update_iteration = m_update1.load(AK::MemoryOrder::memory_order_acquire);
ts = m_epoch_time;
} while (update_iteration != m_update2.load(AK::MemoryOrder::memory_order_acquire));
return ts;
}
u64 TimeManagement::uptime_ms() const
{
auto mtime = monotonic_time();
u64 ms = mtime.tv_sec * 1000ull;
ms += mtime.tv_nsec / 1000000;
return ms;
}
void TimeManagement::initialize(u32 cpu)
@ -98,18 +148,9 @@ void TimeManagement::set_system_timer(HardwareTimerBase& timer)
m_system_timer = timer;
}
time_t TimeManagement::seconds_since_boot() const
{
return m_seconds_since_boot;
}
time_t TimeManagement::ticks_per_second() const
{
return m_system_timer->ticks_per_second();
}
time_t TimeManagement::ticks_this_second() const
{
return m_ticks_this_second;
return m_time_keeper_timer->ticks_per_second();
}
time_t TimeManagement::boot_time() const
@ -221,7 +262,7 @@ bool TimeManagement::probe_and_set_non_legacy_hardware_timers()
}
}
m_system_timer->set_callback(Scheduler::timer_tick);
m_system_timer->set_callback(TimeManagement::timer_tick);
m_time_keeper_timer->set_callback(TimeManagement::update_time);
dbg() << "Reset timers";
@ -243,7 +284,7 @@ bool TimeManagement::probe_and_set_legacy_hardware_timers()
}
m_hardware_timers.append(PIT::initialize(TimeManagement::update_time));
m_hardware_timers.append(RealTimeClock::create(Scheduler::timer_tick));
m_hardware_timers.append(RealTimeClock::create(TimeManagement::timer_tick));
m_time_keeper_timer = m_hardware_timers[0];
m_system_timer = m_hardware_timers[1];
return true;
@ -265,6 +306,8 @@ void TimeManagement::increment_time_since_boot(const RegisterState&)
constexpr time_t MaxSlewNanos = NanosPerTick / 100;
static_assert(MaxSlewNanos < NanosPerTick);
u32 update_iteration = m_update1.fetch_add(1, AK::MemoryOrder::memory_order_relaxed);
// Clamp twice, to make sure intermediate fits into a long.
long slew_nanos = clamp(clamp(m_remaining_epoch_time_adjustment.tv_sec, (time_t)-1, (time_t)1) * 1'000'000'000 + m_remaining_epoch_time_adjustment.tv_nsec, -MaxSlewNanos, MaxSlewNanos);
timespec slew_nanos_ts;
@ -280,6 +323,16 @@ void TimeManagement::increment_time_since_boot(const RegisterState&)
++m_seconds_since_boot;
m_ticks_this_second = 0;
}
m_update2.store(update_iteration + 1, AK::MemoryOrder::memory_order_release);
}
void TimeManagement::timer_tick(const RegisterState& regs)
{
if (Processor::current().in_irq() <= 1) {
// Don't expire timers while handling IRQs
TimerQueue::the().fire();
}
Scheduler::timer_tick(regs);
}
}

View file

@ -46,11 +46,13 @@ public:
static void initialize(u32 cpu);
static TimeManagement& the();
static timespec ticks_to_time(u64 ticks, time_t ticks_per_second);
static u64 time_to_ticks(const timespec& tspec, time_t ticks_per_second);
timespec monotonic_time() const;
timespec epoch_time() const;
void set_epoch_time(timespec);
time_t seconds_since_boot() const;
time_t ticks_per_second() const;
time_t ticks_this_second() const;
time_t boot_time() const;
bool is_system_timer(const HardwareTimerBase&) const;
@ -60,6 +62,8 @@ public:
static bool is_hpet_periodic_mode_allowed();
u64 uptime_ms() const;
u64 monotonic_ticks() const;
static timeval now_as_timeval();
timespec remaining_epoch_time_adjustment() const { return m_remaining_epoch_time_adjustment; }
@ -72,11 +76,16 @@ private:
Vector<HardwareTimerBase*> scan_for_non_periodic_timers();
NonnullRefPtrVector<HardwareTimerBase> m_hardware_timers;
void set_system_timer(HardwareTimerBase&);
static void timer_tick(const RegisterState&);
// Variables between m_update1 and m_update2 are synchronized
Atomic<u32> m_update1 { 0 };
u32 m_ticks_this_second { 0 };
u32 m_seconds_since_boot { 0 };
timespec m_epoch_time { 0, 0 };
timespec m_remaining_epoch_time_adjustment { 0, 0 };
Atomic<u32> m_update2 { 0 };
RefPtr<HardwareTimerBase> m_system_timer;
RefPtr<HardwareTimerBase> m_time_keeper_timer;
};

View file

@ -28,6 +28,7 @@
#include <AK/NonnullOwnPtr.h>
#include <AK/OwnPtr.h>
#include <AK/Singleton.h>
#include <AK/Time.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Time/TimeManagement.h>
#include <Kernel/TimerQueue.h>
@ -35,6 +36,7 @@
namespace Kernel {
static AK::Singleton<TimerQueue> s_the;
static SpinLock<u8> g_timerqueue_lock;
TimerQueue& TimerQueue::the()
{
@ -46,12 +48,38 @@ TimerQueue::TimerQueue()
m_ticks_per_second = TimeManagement::the().ticks_per_second();
}
TimerId TimerQueue::add_timer(NonnullOwnPtr<Timer>&& timer)
RefPtr<Timer> TimerQueue::add_timer_without_id(const timespec& deadline, Function<void()>&& callback)
{
u64 timer_expiration = timer->expires;
ASSERT(timer_expiration >= g_uptime);
if (deadline <= TimeManagement::the().monotonic_time())
return {};
// Because timer handlers can execute on any processor and there is
// a race between executing a timer handler and cancel_timer() this
// *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
// inadvertently cancel another timer that has been created between
// returning from the timer handler and a call to cancel_timer().
auto timer = adopt(*new Timer(time_to_ticks(deadline), move(callback)));
ScopedSpinLock lock(g_timerqueue_lock);
timer->id = 0; // Don't generate a timer id
add_timer_locked(timer);
return timer;
}
TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
{
ScopedSpinLock lock(g_timerqueue_lock);
timer->id = ++m_timer_id_count;
ASSERT(timer->id != 0); // wrapped
add_timer_locked(move(timer));
return m_timer_id_count;
}
void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
{
u64 timer_expiration = timer->expires;
ASSERT(timer_expiration >= time_to_ticks(TimeManagement::the().monotonic_time()));
if (m_timer_queue.is_empty()) {
m_timer_queue.append(move(timer));
@ -69,20 +97,34 @@ TimerId TimerQueue::add_timer(NonnullOwnPtr<Timer>&& timer)
m_next_timer_due = timer_expiration;
}
}
return m_timer_id_count;
}
TimerId TimerQueue::add_timer(timeval& deadline, Function<void()>&& callback)
{
NonnullOwnPtr timer = make<Timer>();
timer->expires = g_uptime + seconds_to_ticks(deadline.tv_sec) + microseconds_to_ticks(deadline.tv_usec);
timer->callback = move(callback);
return add_timer(move(timer));
auto expires = TimeManagement::the().monotonic_time();
timespec_add_timeval(expires, deadline, expires);
return add_timer(adopt(*new Timer(time_to_ticks(expires), move(callback))));
}
timespec TimerQueue::ticks_to_time(u64 ticks) const
{
timespec tspec;
tspec.tv_sec = ticks / m_ticks_per_second;
tspec.tv_nsec = (ticks % m_ticks_per_second) * (1'000'000'000 / m_ticks_per_second);
ASSERT(tspec.tv_nsec <= 1'000'000'000);
return tspec;
}
u64 TimerQueue::time_to_ticks(const timespec& tspec) const
{
u64 ticks = (u64)tspec.tv_sec * m_ticks_per_second;
ticks += ((u64)tspec.tv_nsec * m_ticks_per_second) / 1'000'000'000;
return ticks;
}
bool TimerQueue::cancel_timer(TimerId id)
{
ScopedSpinLock lock(g_timerqueue_lock);
auto it = m_timer_queue.find([id](auto& timer) { return timer->id == id; });
if (it.is_end())
return false;
@ -96,23 +138,45 @@ bool TimerQueue::cancel_timer(TimerId id)
return true;
}
bool TimerQueue::cancel_timer(const NonnullRefPtr<Timer>& timer)
{
ScopedSpinLock lock(g_timerqueue_lock);
auto it = m_timer_queue.find([timer](auto& t) { return t.ptr() == timer.ptr(); });
if (it.is_end())
return false;
auto was_next_timer = it.is_begin();
m_timer_queue.remove(it);
if (was_next_timer)
update_next_timer_due();
return true;
}
void TimerQueue::fire()
{
ScopedSpinLock lock(g_timerqueue_lock);
if (m_timer_queue.is_empty())
return;
ASSERT(m_next_timer_due == m_timer_queue.first()->expires);
while (!m_timer_queue.is_empty() && g_uptime > m_timer_queue.first()->expires) {
while (!m_timer_queue.is_empty() && TimeManagement::the().monotonic_ticks() > m_timer_queue.first()->expires) {
auto timer = m_timer_queue.take_first();
timer->callback();
}
update_next_timer_due();
update_next_timer_due();
lock.unlock();
timer->callback();
lock.lock();
}
}
void TimerQueue::update_next_timer_due()
{
ASSERT(g_timerqueue_lock.is_locked());
if (m_timer_queue.is_empty())
m_next_timer_due = 0;
else

View file

@ -27,8 +27,9 @@
#pragma once
#include <AK/Function.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/NonnullRefPtr.h>
#include <AK/OwnPtr.h>
#include <AK/RefCounted.h>
#include <AK/SinglyLinkedList.h>
#include <Kernel/Time/TimeManagement.h>
@ -36,7 +37,7 @@ namespace Kernel {
typedef u64 TimerId;
struct Timer {
struct Timer : public RefCounted<Timer> {
TimerId id;
u64 expires;
Function<void()> callback;
@ -52,6 +53,12 @@ struct Timer {
{
return id == rhs.id;
}
Timer(u64 expires, Function<void()>&& callback)
: expires(expires)
, callback(move(callback))
{
}
};
class TimerQueue {
@ -59,21 +66,24 @@ public:
TimerQueue();
static TimerQueue& the();
TimerId add_timer(NonnullOwnPtr<Timer>&&);
TimerId add_timer(NonnullRefPtr<Timer>&&);
RefPtr<Timer> add_timer_without_id(const timespec& timeout, Function<void()>&& callback);
TimerId add_timer(timeval& timeout, Function<void()>&& callback);
bool cancel_timer(TimerId id);
bool cancel_timer(const NonnullRefPtr<Timer>&);
void fire();
private:
void update_next_timer_due();
void add_timer_locked(NonnullRefPtr<Timer>);
u64 microseconds_to_ticks(u64 micro_seconds) { return micro_seconds * (m_ticks_per_second / 1'000'000); }
u64 seconds_to_ticks(u64 seconds) { return seconds * m_ticks_per_second; }
timespec ticks_to_time(u64 ticks) const;
u64 time_to_ticks(const timespec&) const;
u64 m_next_timer_due { 0 };
u64 m_timer_id_count { 0 };
u64 m_ticks_per_second { 0 };
SinglyLinkedList<NonnullOwnPtr<Timer>> m_timer_queue;
SinglyLinkedList<NonnullRefPtr<Timer>> m_timer_queue;
};
}