mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-12-01 20:10:28 +00:00
Kernel: Improve time keeping and dramatically reduce interrupt load
This implements a number of changes related to time: * If a HPET is present, it is now used only as a system timer, unless the Local APIC timer is used (in which case the HPET timer will not trigger any interrupts at all). * If a HPET is present, the current time can now be as accurate as the chip can be, independently from the system timer. We now query the HPET main counter for the current time in CPU #0's system timer interrupt, and use that as a base line. If a high precision time is queried, that base line is used in combination with quering the HPET timer directly, which should give a much more accurate time stamp at the expense of more overhead. For faster time stamps, the more coarse value based on the last interrupt will be returned. This also means that any missed interrupts should not cause the time to drift. * The default system interrupt rate is reduced to about 250 per second. * Fix calculation of Thread CPU usage by using the amount of ticks they used rather than the number of times a context switch happened. * Implement CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE and use it for most cases where precise timestamps are not needed.
This commit is contained in:
parent
a3fdf5148b
commit
5f51d85184
Notes:
sideshowbarker
2024-07-19 00:41:32 +09:00
Author: https://github.com/tomuta Commit: https://github.com/SerenityOS/serenity/commit/5f51d851840 Pull-request: https://github.com/SerenityOS/serenity/pull/4320
32 changed files with 318 additions and 190 deletions
|
@ -355,12 +355,12 @@ void ProcessModel::update()
|
|||
auto previous_pid_count = m_pids.size();
|
||||
auto all_processes = Core::ProcessStatisticsReader::get_all();
|
||||
|
||||
unsigned last_sum_times_scheduled = 0;
|
||||
u64 last_sum_ticks_scheduled = 0;
|
||||
for (auto& it : m_threads)
|
||||
last_sum_times_scheduled += it.value->current_state.times_scheduled;
|
||||
last_sum_ticks_scheduled += it.value->current_state.ticks_user + it.value->current_state.ticks_kernel;
|
||||
|
||||
HashTable<PidAndTid> live_pids;
|
||||
unsigned sum_times_scheduled = 0;
|
||||
u64 sum_ticks_scheduled = 0;
|
||||
for (auto& it : all_processes) {
|
||||
for (auto& thread : it.value.threads) {
|
||||
ThreadState state;
|
||||
|
@ -393,12 +393,14 @@ void ProcessModel::update()
|
|||
state.pgid = it.value.pgid;
|
||||
state.sid = it.value.sid;
|
||||
state.times_scheduled = thread.times_scheduled;
|
||||
state.ticks_user = thread.ticks_user;
|
||||
state.ticks_kernel = thread.ticks_kernel;
|
||||
state.cpu = thread.cpu;
|
||||
state.cpu_percent = 0;
|
||||
state.priority = thread.priority;
|
||||
state.effective_priority = thread.effective_priority;
|
||||
state.state = thread.state;
|
||||
sum_times_scheduled += thread.times_scheduled;
|
||||
sum_ticks_scheduled += thread.ticks_user + thread.ticks_kernel;
|
||||
{
|
||||
auto pit = m_threads.find({ it.value.pid, thread.tid });
|
||||
if (pit == m_threads.end())
|
||||
|
@ -423,8 +425,9 @@ void ProcessModel::update()
|
|||
continue;
|
||||
}
|
||||
auto& process = *it.value;
|
||||
u32 times_scheduled_diff = process.current_state.times_scheduled - process.previous_state.times_scheduled;
|
||||
process.current_state.cpu_percent = ((float)times_scheduled_diff * 100) / (float)(sum_times_scheduled - last_sum_times_scheduled);
|
||||
u32 times_scheduled_diff = (process.current_state.ticks_user + process.current_state.ticks_kernel)
|
||||
- (process.previous_state.ticks_user + process.previous_state.ticks_kernel);
|
||||
process.current_state.cpu_percent = ((float)times_scheduled_diff * 100) / (float)(sum_ticks_scheduled - last_sum_ticks_scheduled);
|
||||
if (it.key.pid != 0) {
|
||||
m_cpus[process.current_state.cpu].total_cpu_percent += process.current_state.cpu_percent;
|
||||
m_pids.append(it.key);
|
||||
|
|
|
@ -116,6 +116,8 @@ private:
|
|||
pid_t pgid;
|
||||
pid_t sid;
|
||||
unsigned times_scheduled;
|
||||
unsigned ticks_user;
|
||||
unsigned ticks_kernel;
|
||||
String name;
|
||||
String state;
|
||||
String user;
|
||||
|
|
|
@ -884,7 +884,8 @@ static OwnPtr<KBuffer> procfs$all(InodeIdentifier)
|
|||
thread_object.add("tid", thread.tid().value());
|
||||
thread_object.add("name", thread.name());
|
||||
thread_object.add("times_scheduled", thread.times_scheduled());
|
||||
thread_object.add("ticks", thread.ticks());
|
||||
thread_object.add("ticks_user", thread.ticks_in_user());
|
||||
thread_object.add("ticks_kernel", thread.ticks_in_kernel());
|
||||
thread_object.add("state", thread.state_string());
|
||||
thread_object.add("cpu", thread.cpu());
|
||||
thread_object.add("priority", thread.priority());
|
||||
|
|
|
@ -64,10 +64,10 @@ void Scheduler::init_thread(Thread& thread)
|
|||
|
||||
static u32 time_slice_for(const Thread& thread)
|
||||
{
|
||||
// One time slice unit == 1ms
|
||||
// One time slice unit == 4ms (assuming 250 ticks/second)
|
||||
if (&thread == Processor::current().idle_thread())
|
||||
return 1;
|
||||
return 10;
|
||||
return 2;
|
||||
}
|
||||
|
||||
Thread* g_finalizer;
|
||||
|
@ -219,6 +219,7 @@ bool Scheduler::pick_next()
|
|||
// but since we're still holding the scheduler lock we're still in a critical section
|
||||
critical.leave();
|
||||
|
||||
thread_to_schedule->set_ticks_left(time_slice_for(*thread_to_schedule));
|
||||
return context_switch(thread_to_schedule);
|
||||
}
|
||||
|
||||
|
@ -317,7 +318,6 @@ bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
|
|||
|
||||
bool Scheduler::context_switch(Thread* thread)
|
||||
{
|
||||
thread->set_ticks_left(time_slice_for(*thread));
|
||||
thread->did_schedule();
|
||||
|
||||
auto from_thread = Thread::current();
|
||||
|
@ -480,7 +480,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
|
|||
}
|
||||
}
|
||||
|
||||
if (current_thread->tick())
|
||||
if (current_thread->tick((regs.cs & 3) == 0))
|
||||
return;
|
||||
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
|
|
|
@ -46,9 +46,9 @@ unsigned Process::sys$alarm(unsigned seconds)
|
|||
}
|
||||
|
||||
if (seconds > 0) {
|
||||
auto deadline = TimeManagement::the().current_time(CLOCK_REALTIME).value();
|
||||
auto deadline = TimeManagement::the().current_time(CLOCK_REALTIME_COARSE).value();
|
||||
timespec_add(deadline, { seconds, 0 }, deadline);
|
||||
m_alarm_timer = TimerQueue::the().add_timer_without_id(CLOCK_REALTIME, deadline, [this]() {
|
||||
m_alarm_timer = TimerQueue::the().add_timer_without_id(CLOCK_REALTIME_COARSE, deadline, [this]() {
|
||||
[[maybe_unused]] auto rc = send_signal(SIGALRM, nullptr);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -78,25 +78,21 @@ int Process::sys$clock_nanosleep(Userspace<const Syscall::SC_clock_nanosleep_par
|
|||
|
||||
bool is_absolute = params.flags & TIMER_ABSTIME;
|
||||
|
||||
switch (params.clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
case CLOCK_REALTIME: {
|
||||
bool was_interrupted;
|
||||
if (is_absolute) {
|
||||
was_interrupted = Thread::current()->sleep_until(params.clock_id, requested_sleep).was_interrupted();
|
||||
} else {
|
||||
timespec remaining_sleep;
|
||||
was_interrupted = Thread::current()->sleep(params.clock_id, requested_sleep, &remaining_sleep).was_interrupted();
|
||||
if (was_interrupted && params.remaining_sleep && !copy_to_user(params.remaining_sleep, &remaining_sleep))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (was_interrupted)
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
if (!TimeManagement::is_valid_clock_id(params.clock_id))
|
||||
return -EINVAL;
|
||||
|
||||
bool was_interrupted;
|
||||
if (is_absolute) {
|
||||
was_interrupted = Thread::current()->sleep_until(params.clock_id, requested_sleep).was_interrupted();
|
||||
} else {
|
||||
timespec remaining_sleep;
|
||||
was_interrupted = Thread::current()->sleep(params.clock_id, requested_sleep, &remaining_sleep).was_interrupted();
|
||||
if (was_interrupted && params.remaining_sleep && !copy_to_user(params.remaining_sleep, &remaining_sleep))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (was_interrupted)
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int Process::sys$adjtime(Userspace<const timeval*> user_delta, Userspace<timeval*> user_old_delta)
|
||||
|
|
|
@ -400,13 +400,15 @@ void Thread::finalize_dying_threads()
|
|||
}
|
||||
}
|
||||
|
||||
bool Thread::tick()
|
||||
bool Thread::tick(bool in_kernel)
|
||||
{
|
||||
++m_ticks;
|
||||
if (tss().cs & 3)
|
||||
++m_process->m_ticks_in_user;
|
||||
else
|
||||
if (in_kernel) {
|
||||
++m_process->m_ticks_in_kernel;
|
||||
++m_ticks_in_kernel;
|
||||
} else {
|
||||
++m_process->m_ticks_in_user;
|
||||
++m_ticks_in_user;
|
||||
}
|
||||
return --m_ticks_left;
|
||||
}
|
||||
|
||||
|
|
|
@ -211,7 +211,7 @@ public:
|
|||
: m_infinite(true)
|
||||
{
|
||||
}
|
||||
explicit BlockTimeout(bool is_absolute, const timeval* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC)
|
||||
explicit BlockTimeout(bool is_absolute, const timeval* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE)
|
||||
: m_clock_id(clock_id)
|
||||
, m_infinite(!time)
|
||||
{
|
||||
|
@ -225,7 +225,7 @@ public:
|
|||
timespec_add(m_time, m_start_time, m_time);
|
||||
}
|
||||
}
|
||||
explicit BlockTimeout(bool is_absolute, const timespec* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC)
|
||||
explicit BlockTimeout(bool is_absolute, const timespec* time, const timespec* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE)
|
||||
: m_clock_id(clock_id)
|
||||
, m_infinite(!time)
|
||||
{
|
||||
|
@ -249,7 +249,7 @@ public:
|
|||
private:
|
||||
timespec m_time { 0, 0 };
|
||||
timespec m_start_time { 0, 0 };
|
||||
clockid_t m_clock_id { CLOCK_MONOTONIC };
|
||||
clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
|
||||
bool m_infinite { false };
|
||||
bool m_should_block { false };
|
||||
};
|
||||
|
@ -756,7 +756,6 @@ public:
|
|||
const TSS32& tss() const { return m_tss; }
|
||||
State state() const { return m_state; }
|
||||
const char* state_string() const;
|
||||
u32 ticks() const { return m_ticks; }
|
||||
|
||||
VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
|
||||
size_t thread_specific_region_size() const { return m_thread_specific_region_size; }
|
||||
|
@ -921,12 +920,12 @@ public:
|
|||
BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr);
|
||||
BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr)
|
||||
{
|
||||
return sleep(CLOCK_MONOTONIC, duration, remaining_time);
|
||||
return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
|
||||
}
|
||||
BlockResult sleep_until(clockid_t, const timespec&);
|
||||
BlockResult sleep_until(const timespec& duration)
|
||||
{
|
||||
return sleep_until(CLOCK_MONOTONIC, duration);
|
||||
return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
|
||||
}
|
||||
|
||||
// Tell this thread to unblock if needed,
|
||||
|
@ -937,7 +936,7 @@ public:
|
|||
|
||||
void exit(void* = nullptr);
|
||||
|
||||
bool tick();
|
||||
bool tick(bool in_kernel);
|
||||
void set_ticks_left(u32 t) { m_ticks_left = t; }
|
||||
u32 ticks_left() const { return m_ticks_left; }
|
||||
|
||||
|
@ -1070,6 +1069,9 @@ public:
|
|||
static constexpr u32 default_kernel_stack_size = 65536;
|
||||
static constexpr u32 default_userspace_stack_size = 4 * MiB;
|
||||
|
||||
u32 ticks_in_user() const { return m_ticks_in_user; }
|
||||
u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
|
||||
|
||||
RecursiveSpinLock& get_lock() const { return m_lock; }
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
|
@ -1188,9 +1190,10 @@ private:
|
|||
TSS32 m_tss;
|
||||
Atomic<u32> m_cpu { 0 };
|
||||
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
|
||||
u32 m_ticks { 0 };
|
||||
u32 m_ticks_left { 0 };
|
||||
u32 m_times_scheduled { 0 };
|
||||
u32 m_ticks_in_user { 0 };
|
||||
u32 m_ticks_in_kernel { 0 };
|
||||
u32 m_pending_signals { 0 };
|
||||
u32 m_signal_mask { 0 };
|
||||
u32 m_kernel_stack_base { 0 };
|
||||
|
|
|
@ -44,6 +44,7 @@ public:
|
|||
virtual bool is_periodic_capable() const override { return true; }
|
||||
virtual void set_periodic() override;
|
||||
virtual void set_non_periodic() override;
|
||||
virtual void disable() override { }
|
||||
|
||||
virtual void reset_to_default_ticks_per_second() override;
|
||||
virtual bool try_to_set_frequency(size_t frequency) override;
|
||||
|
|
|
@ -197,10 +197,14 @@ void HPET::update_periodic_comparator_value()
|
|||
auto& regs = registers();
|
||||
|
||||
u64 previous_main_value = (u64)regs.main_counter_value.low | ((u64)regs.main_counter_value.high << 32);
|
||||
m_main_counter_drift += previous_main_value - m_main_counter_last_read;
|
||||
m_main_counter_last_read = 0;
|
||||
regs.main_counter_value.low = 0;
|
||||
regs.main_counter_value.high = 0;
|
||||
for (auto& comparator : m_comparators) {
|
||||
auto& timer = regs.timers[comparator.comparator_number()];
|
||||
if (!comparator.is_enabled())
|
||||
continue;
|
||||
if (comparator.is_periodic()) {
|
||||
// Note that this means we're restarting all periodic timers. There is no
|
||||
// way to resume periodic timers properly because we reset the main counter
|
||||
|
@ -242,6 +246,33 @@ void HPET::update_non_periodic_comparator_value(const HPETComparator& comparator
|
|||
timer.comparator_value.low = (u32)new_counter_value;
|
||||
}
|
||||
|
||||
u64 HPET::update_time(u64& seconds_since_boot, u32& ticks_this_second, bool query_only)
|
||||
{
|
||||
// Should only be called by the time keeper interrupt handler!
|
||||
u64 current_value = read_register_safe64(registers().main_counter_value);
|
||||
u64 delta_ticks = m_main_counter_drift;
|
||||
if (current_value >= m_main_counter_last_read)
|
||||
delta_ticks += current_value - m_main_counter_last_read;
|
||||
else
|
||||
delta_ticks += m_main_counter_last_read - current_value; // the counter wrapped around
|
||||
u64 ticks_since_last_second = (u64)ticks_this_second + delta_ticks;
|
||||
auto ticks_per_second = frequency();
|
||||
if (ticks_since_last_second > ticks_per_second) {
|
||||
seconds_since_boot += ticks_since_last_second / ticks_per_second;
|
||||
ticks_this_second = ticks_since_last_second % ticks_per_second;
|
||||
} else {
|
||||
ticks_this_second = ticks_since_last_second;
|
||||
}
|
||||
|
||||
if (!query_only) {
|
||||
m_main_counter_drift = 0;
|
||||
m_main_counter_last_read = current_value;
|
||||
}
|
||||
|
||||
// Return the time passed (in ns) since last time update_time was called
|
||||
return (delta_ticks * 1000000000ull) / ticks_per_second;
|
||||
}
|
||||
|
||||
void HPET::enable_periodic_interrupt(const HPETComparator& comparator)
|
||||
{
|
||||
#ifdef HPET_DEBUG
|
||||
|
@ -253,7 +284,8 @@ void HPET::enable_periodic_interrupt(const HPETComparator& comparator)
|
|||
auto capabilities = timer.capabilities;
|
||||
ASSERT(capabilities & (u32)HPETFlags::TimerConfiguration::PeriodicInterruptCapable);
|
||||
timer.capabilities = capabilities | (u32)HPETFlags::TimerConfiguration::GeneratePeriodicInterrupt;
|
||||
enable(comparator);
|
||||
if (comparator.is_enabled())
|
||||
enable(comparator);
|
||||
}
|
||||
void HPET::disable_periodic_interrupt(const HPETComparator& comparator)
|
||||
{
|
||||
|
@ -266,7 +298,8 @@ void HPET::disable_periodic_interrupt(const HPETComparator& comparator)
|
|||
auto capabilities = timer.capabilities;
|
||||
ASSERT(capabilities & (u32)HPETFlags::TimerConfiguration::PeriodicInterruptCapable);
|
||||
timer.capabilities = capabilities & ~(u32)HPETFlags::TimerConfiguration::GeneratePeriodicInterrupt;
|
||||
enable(comparator);
|
||||
if (comparator.is_enabled())
|
||||
enable(comparator);
|
||||
}
|
||||
|
||||
void HPET::disable(const HPETComparator& comparator)
|
||||
|
@ -288,11 +321,6 @@ void HPET::enable(const HPETComparator& comparator)
|
|||
timer.capabilities = timer.capabilities | (u32)HPETFlags::TimerConfiguration::InterruptEnable;
|
||||
}
|
||||
|
||||
u64 HPET::frequency() const
|
||||
{
|
||||
return m_frequency;
|
||||
}
|
||||
|
||||
Vector<unsigned> HPET::capable_interrupt_numbers(const HPETComparator& comparator)
|
||||
{
|
||||
ASSERT(comparator.comparator_number() <= m_comparators.size());
|
||||
|
@ -394,7 +422,7 @@ HPET::HPET(PhysicalAddress acpi_hpet)
|
|||
klog() << "HPET: frequency " << m_frequency << " Hz (" << MEGAHERTZ_TO_HERTZ(m_frequency) << " MHz) resolution: " << calculate_ticks_in_nanoseconds() << "ns";
|
||||
ASSERT(regs.capabilities.main_counter_tick_period <= ABSOLUTE_MAXIMUM_COUNTER_TICK_PERIOD);
|
||||
|
||||
// Reset the counter, just in case...
|
||||
// Reset the counter, just in case... (needs to match m_main_counter_last_read)
|
||||
regs.main_counter_value.high = 0;
|
||||
regs.main_counter_value.low = 0;
|
||||
if (regs.capabilities.attributes & (u32)HPETFlags::Attributes::LegacyReplacementRouteCapable)
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
static bool check_for_exisiting_periodic_timers();
|
||||
static HPET& the();
|
||||
|
||||
u64 frequency() const;
|
||||
u64 frequency() const { return m_frequency; }
|
||||
|
||||
const NonnullRefPtrVector<HPETComparator>& comparators() const { return m_comparators; }
|
||||
void disable(const HPETComparator&);
|
||||
|
@ -59,6 +59,8 @@ public:
|
|||
void enable_periodic_interrupt(const HPETComparator& comparator);
|
||||
void disable_periodic_interrupt(const HPETComparator& comparator);
|
||||
|
||||
u64 update_time(u64& seconds_since_boot, u32& ticks_this_second, bool query_only);
|
||||
|
||||
Vector<unsigned> capable_interrupt_numbers(u8 comparator_number);
|
||||
Vector<unsigned> capable_interrupt_numbers(const HPETComparator&);
|
||||
|
||||
|
@ -80,7 +82,9 @@ private:
|
|||
PhysicalAddress m_physical_acpi_hpet_registers;
|
||||
OwnPtr<Region> m_hpet_mmio_region;
|
||||
|
||||
u64 m_main_counter_clock_period { 0 };
|
||||
u64 m_main_counter_last_read { 0 };
|
||||
u64 m_main_counter_drift { 0 };
|
||||
|
||||
u16 m_vendor_id;
|
||||
u16 m_minimum_tick;
|
||||
u64 m_frequency;
|
||||
|
|
|
@ -41,20 +41,31 @@ HPETComparator::HPETComparator(u8 number, u8 irq, bool periodic_capable)
|
|||
: HardwareTimer(irq)
|
||||
, m_periodic(false)
|
||||
, m_periodic_capable(periodic_capable)
|
||||
, m_enabled(false)
|
||||
, m_comparator_number(number)
|
||||
{
|
||||
}
|
||||
|
||||
void HPETComparator::disable()
|
||||
{
|
||||
if (!m_enabled)
|
||||
return;
|
||||
m_enabled = false;
|
||||
HPET::the().disable(*this);
|
||||
}
|
||||
|
||||
void HPETComparator::set_periodic()
|
||||
{
|
||||
ASSERT(m_periodic_capable);
|
||||
m_periodic = true;
|
||||
m_enabled = true;
|
||||
HPET::the().enable_periodic_interrupt(*this);
|
||||
}
|
||||
void HPETComparator::set_non_periodic()
|
||||
{
|
||||
ASSERT(m_periodic_capable);
|
||||
m_periodic = false;
|
||||
m_enabled = true;
|
||||
HPET::the().disable_periodic_interrupt(*this);
|
||||
}
|
||||
|
||||
|
@ -79,7 +90,7 @@ size_t HPETComparator::ticks_per_second() const
|
|||
|
||||
void HPETComparator::reset_to_default_ticks_per_second()
|
||||
{
|
||||
ASSERT(is_capable_of_frequency(OPTIMAL_TICKS_PER_SECOND_RATE));
|
||||
dbg() << "reset_to_default_ticks_per_second";
|
||||
m_frequency = OPTIMAL_TICKS_PER_SECOND_RATE;
|
||||
if (!is_periodic())
|
||||
set_new_countdown();
|
||||
|
@ -89,13 +100,16 @@ void HPETComparator::reset_to_default_ticks_per_second()
|
|||
bool HPETComparator::try_to_set_frequency(size_t frequency)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
if (!is_capable_of_frequency(frequency))
|
||||
if (!is_capable_of_frequency(frequency)) {
|
||||
dbg() << "HPETComparator: not cable of frequency: " << frequency;
|
||||
return false;
|
||||
if (m_frequency == frequency)
|
||||
return true;
|
||||
}
|
||||
|
||||
auto hpet_frequency = HPET::the().frequency();
|
||||
ASSERT(frequency <= hpet_frequency);
|
||||
m_frequency = frequency;
|
||||
m_enabled = true;
|
||||
|
||||
#ifdef HPET_COMPARATOR_DEBUG
|
||||
dbg() << "HPET Comparator: Max frequency " << hpet_frequency << " Hz, want to set " << frequency << " Hz, periodic: " << is_periodic();
|
||||
#endif
|
||||
|
@ -112,16 +126,17 @@ bool HPETComparator::is_capable_of_frequency(size_t frequency) const
|
|||
{
|
||||
if (frequency > HPET::the().frequency())
|
||||
return false;
|
||||
if ((HPET::the().frequency() % frequency) != 0)
|
||||
return false;
|
||||
// HPET::update_periodic_comparator_value and HPET::update_non_periodic_comparator_value
|
||||
// calculate the best counter based on the desired frequency.
|
||||
return true;
|
||||
}
|
||||
size_t HPETComparator::calculate_nearest_possible_frequency(size_t frequency) const
|
||||
{
|
||||
if (frequency >= HPET::the().frequency())
|
||||
if (frequency > HPET::the().frequency())
|
||||
return HPET::the().frequency();
|
||||
// FIXME: Use better math here
|
||||
return (frequency + (HPET::the().frequency() % frequency));
|
||||
// HPET::update_periodic_comparator_value and HPET::update_non_periodic_comparator_value
|
||||
// calculate the best counter based on the desired frequency.
|
||||
return frequency;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ public:
|
|||
virtual const char* model() const override { return "HPET"; }
|
||||
|
||||
u8 comparator_number() const { return m_comparator_number; }
|
||||
bool is_enabled() const { return m_enabled; }
|
||||
|
||||
virtual size_t ticks_per_second() const override;
|
||||
|
||||
|
@ -49,6 +50,7 @@ public:
|
|||
virtual bool is_periodic_capable() const override { return m_periodic_capable; }
|
||||
virtual void set_periodic() override;
|
||||
virtual void set_non_periodic() override;
|
||||
virtual void disable() override;
|
||||
|
||||
virtual void reset_to_default_ticks_per_second() override;
|
||||
virtual bool try_to_set_frequency(size_t frequency) override;
|
||||
|
@ -61,7 +63,7 @@ private:
|
|||
HPETComparator(u8 number, u8 irq, bool periodic_capable);
|
||||
bool m_periodic : 1;
|
||||
bool m_periodic_capable : 1;
|
||||
bool m_edge_triggered : 1;
|
||||
bool m_enabled : 1;
|
||||
u8 m_comparator_number { 0 };
|
||||
};
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ public:
|
|||
virtual bool is_periodic_capable() const = 0;
|
||||
virtual void set_periodic() = 0;
|
||||
virtual void set_non_periodic() = 0;
|
||||
virtual void disable() = 0;
|
||||
|
||||
virtual size_t ticks_per_second() const = 0;
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ public:
|
|||
virtual bool is_periodic_capable() const override { return true; }
|
||||
virtual void set_periodic() override;
|
||||
virtual void set_non_periodic() override;
|
||||
virtual void disable() override { }
|
||||
|
||||
virtual void reset_to_default_ticks_per_second() override;
|
||||
virtual bool try_to_set_frequency(size_t frequency) override;
|
||||
|
|
|
@ -42,6 +42,7 @@ public:
|
|||
virtual bool is_periodic_capable() const override { return true; }
|
||||
virtual void set_periodic() override { }
|
||||
virtual void set_non_periodic() override { }
|
||||
virtual void disable() override { }
|
||||
|
||||
virtual void reset_to_default_ticks_per_second() override;
|
||||
virtual bool try_to_set_frequency(size_t frequency) override;
|
||||
|
|
|
@ -52,13 +52,33 @@ TimeManagement& TimeManagement::the()
|
|||
return *s_the;
|
||||
}
|
||||
|
||||
bool TimeManagement::is_valid_clock_id(clockid_t clock_id)
|
||||
{
|
||||
switch (clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
case CLOCK_MONOTONIC_COARSE:
|
||||
case CLOCK_MONOTONIC_RAW:
|
||||
case CLOCK_REALTIME:
|
||||
case CLOCK_REALTIME_COARSE:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
KResultOr<timespec> TimeManagement::current_time(clockid_t clock_id) const
|
||||
{
|
||||
switch (clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
return monotonic_time();
|
||||
return monotonic_time(TimePrecision::Precise);
|
||||
case CLOCK_MONOTONIC_COARSE:
|
||||
return monotonic_time(TimePrecision::Coarse);
|
||||
case CLOCK_MONOTONIC_RAW:
|
||||
return monotonic_time_raw();
|
||||
case CLOCK_REALTIME:
|
||||
return epoch_time();
|
||||
return epoch_time(TimePrecision::Precise);
|
||||
case CLOCK_REALTIME_COARSE:
|
||||
return epoch_time(TimePrecision::Coarse);
|
||||
default:
|
||||
return KResult(EINVAL);
|
||||
}
|
||||
|
@ -69,22 +89,6 @@ bool TimeManagement::is_system_timer(const HardwareTimerBase& timer) const
|
|||
return &timer == m_system_timer.ptr();
|
||||
}
|
||||
|
||||
timespec TimeManagement::ticks_to_time(u64 ticks, time_t ticks_per_second)
|
||||
{
|
||||
timespec tspec;
|
||||
tspec.tv_sec = ticks / ticks_per_second;
|
||||
tspec.tv_nsec = (ticks % ticks_per_second) * (1'000'000'000 / ticks_per_second);
|
||||
ASSERT(tspec.tv_nsec <= 1'000'000'000);
|
||||
return tspec;
|
||||
}
|
||||
|
||||
u64 TimeManagement::time_to_ticks(const timespec& tspec, time_t ticks_per_second)
|
||||
{
|
||||
u64 ticks = (u64)tspec.tv_sec * ticks_per_second;
|
||||
ticks += ((u64)tspec.tv_nsec * ticks_per_second) / 1'000'000'000;
|
||||
return ticks;
|
||||
}
|
||||
|
||||
void TimeManagement::set_epoch_time(timespec ts)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
@ -92,27 +96,40 @@ void TimeManagement::set_epoch_time(timespec ts)
|
|||
m_remaining_epoch_time_adjustment = { 0, 0 };
|
||||
}
|
||||
|
||||
u64 TimeManagement::monotonic_ticks() const
|
||||
timespec TimeManagement::monotonic_time(TimePrecision precision) const
|
||||
{
|
||||
long seconds;
|
||||
u64 ticks;
|
||||
// This is the time when last updated by an interrupt.
|
||||
u64 seconds;
|
||||
u32 ticks;
|
||||
|
||||
bool do_query = precision == TimePrecision::Precise && m_can_query_precise_time;
|
||||
|
||||
u32 update_iteration;
|
||||
do {
|
||||
update_iteration = m_update1.load(AK::MemoryOrder::memory_order_acquire);
|
||||
seconds = m_seconds_since_boot;
|
||||
ticks = m_ticks_this_second;
|
||||
|
||||
if (do_query) {
|
||||
// We may have to do this over again if the timer interrupt fires
|
||||
// while we're trying to query the information. In that case, our
|
||||
// seconds and ticks became invalid, producing an incorrect time.
|
||||
// Be sure to not modify m_seconds_since_boot and m_ticks_this_second
|
||||
// because this may only be modified by the interrupt handler
|
||||
HPET::the().update_time(seconds, ticks, true);
|
||||
}
|
||||
} while (update_iteration != m_update2.load(AK::MemoryOrder::memory_order_acquire));
|
||||
return ticks + (u64)seconds * (u64)ticks_per_second();
|
||||
|
||||
ASSERT(m_time_ticks_per_second > 0);
|
||||
ASSERT(ticks < m_time_ticks_per_second);
|
||||
u64 ns = ((u64)ticks * 1000000000ull) / m_time_ticks_per_second;
|
||||
ASSERT(ns < 1000000000ull);
|
||||
return { (long)seconds, (long)ns };
|
||||
}
|
||||
|
||||
timespec TimeManagement::monotonic_time() const
|
||||
{
|
||||
return ticks_to_time(monotonic_ticks(), ticks_per_second());
|
||||
}
|
||||
|
||||
timespec TimeManagement::epoch_time() const
|
||||
timespec TimeManagement::epoch_time(TimePrecision) const
|
||||
{
|
||||
// TODO: Take into account precision
|
||||
timespec ts;
|
||||
u32 update_iteration;
|
||||
do {
|
||||
|
@ -155,7 +172,9 @@ void TimeManagement::initialize(u32 cpu)
|
|||
|
||||
void TimeManagement::set_system_timer(HardwareTimerBase& timer)
|
||||
{
|
||||
ASSERT(Processor::current().id() == 0); // This should only be called on the BSP!
|
||||
auto original_callback = m_system_timer->set_callback(nullptr);
|
||||
m_system_timer->disable();
|
||||
timer.set_callback(move(original_callback));
|
||||
m_system_timer = timer;
|
||||
}
|
||||
|
@ -259,28 +278,36 @@ bool TimeManagement::probe_and_set_non_legacy_hardware_timers()
|
|||
if (is_hpet_periodic_mode_allowed())
|
||||
ASSERT(!periodic_timers.is_empty());
|
||||
|
||||
ASSERT(periodic_timers.size() + non_periodic_timers.size() >= 2);
|
||||
ASSERT(periodic_timers.size() + non_periodic_timers.size() > 0);
|
||||
|
||||
if (periodic_timers.size() >= 2) {
|
||||
m_time_keeper_timer = periodic_timers[1];
|
||||
if (periodic_timers.size() > 0)
|
||||
m_system_timer = periodic_timers[0];
|
||||
} else {
|
||||
if (periodic_timers.size() == 1) {
|
||||
m_time_keeper_timer = periodic_timers[0];
|
||||
m_system_timer = non_periodic_timers[0];
|
||||
} else {
|
||||
m_time_keeper_timer = non_periodic_timers[1];
|
||||
m_system_timer = non_periodic_timers[0];
|
||||
else
|
||||
m_system_timer = non_periodic_timers[0];
|
||||
|
||||
m_system_timer->set_callback([this](const RegisterState& regs) {
|
||||
// Update the time. We don't really care too much about the
|
||||
// frequency of the interrupt because we'll query the main
|
||||
// counter to get an accurate time.
|
||||
if (Processor::current().id() == 0) {
|
||||
// TODO: Have the other CPUs call system_timer_tick directly
|
||||
increment_time_since_boot_hpet();
|
||||
}
|
||||
}
|
||||
|
||||
m_system_timer->set_callback(TimeManagement::timer_tick);
|
||||
m_time_keeper_timer->set_callback(TimeManagement::update_time);
|
||||
system_timer_tick(regs);
|
||||
});
|
||||
|
||||
dbg() << "Reset timers";
|
||||
m_system_timer->try_to_set_frequency(m_system_timer->calculate_nearest_possible_frequency(1024));
|
||||
m_time_keeper_timer->try_to_set_frequency(OPTIMAL_TICKS_PER_SECOND_RATE);
|
||||
// Use the HPET main counter frequency for time purposes. This is likely
|
||||
// a much higher frequency than the interrupt itself and allows us to
|
||||
// keep a more accurate time
|
||||
m_can_query_precise_time = true;
|
||||
m_time_ticks_per_second = HPET::the().frequency();
|
||||
|
||||
m_system_timer->try_to_set_frequency(m_system_timer->calculate_nearest_possible_frequency(OPTIMAL_TICKS_PER_SECOND_RATE));
|
||||
|
||||
// We don't need an interrupt for time keeping purposes because we
|
||||
// can query the timer.
|
||||
m_time_keeper_timer = m_system_timer;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -296,18 +323,43 @@ bool TimeManagement::probe_and_set_legacy_hardware_timers()
|
|||
}
|
||||
|
||||
m_hardware_timers.append(PIT::initialize(TimeManagement::update_time));
|
||||
m_hardware_timers.append(RealTimeClock::create(TimeManagement::timer_tick));
|
||||
m_hardware_timers.append(RealTimeClock::create(TimeManagement::system_timer_tick));
|
||||
m_time_keeper_timer = m_hardware_timers[0];
|
||||
m_system_timer = m_hardware_timers[1];
|
||||
|
||||
// The timer is only as accurate as the interrupts...
|
||||
m_time_ticks_per_second = m_time_keeper_timer->ticks_per_second();
|
||||
return true;
|
||||
}
|
||||
|
||||
void TimeManagement::update_time(const RegisterState& regs)
|
||||
void TimeManagement::update_time(const RegisterState&)
|
||||
{
|
||||
TimeManagement::the().increment_time_since_boot(regs);
|
||||
TimeManagement::the().increment_time_since_boot();
|
||||
}
|
||||
|
||||
void TimeManagement::increment_time_since_boot(const RegisterState&)
|
||||
void TimeManagement::increment_time_since_boot_hpet()
|
||||
{
|
||||
ASSERT(!m_time_keeper_timer.is_null());
|
||||
ASSERT(m_time_keeper_timer->timer_type() == HardwareTimerType::HighPrecisionEventTimer);
|
||||
|
||||
// NOTE: m_seconds_since_boot and m_ticks_this_second are only ever
|
||||
// updated here! So we can safely read that information, query the clock,
|
||||
// and when we're all done we can update the information. This reduces
|
||||
// contention when other processors attempt to read the clock.
|
||||
auto seconds_since_boot = m_seconds_since_boot;
|
||||
auto ticks_this_second = m_ticks_this_second;
|
||||
auto delta_ns = HPET::the().update_time(seconds_since_boot, ticks_this_second, false);
|
||||
|
||||
// Now that we have a precise time, go update it as quickly as we can
|
||||
u32 update_iteration = m_update1.fetch_add(1, AK::MemoryOrder::memory_order_acquire);
|
||||
m_seconds_since_boot = seconds_since_boot;
|
||||
m_ticks_this_second = ticks_this_second;
|
||||
// TODO: Apply m_remaining_epoch_time_adjustment
|
||||
timespec_add(m_epoch_time, { (time_t)(delta_ns / 1000000000), (long)(delta_ns % 1000000000) }, m_epoch_time);
|
||||
m_update2.store(update_iteration + 1, AK::MemoryOrder::memory_order_release);
|
||||
}
|
||||
|
||||
void TimeManagement::increment_time_since_boot()
|
||||
{
|
||||
ASSERT(!m_time_keeper_timer.is_null());
|
||||
|
||||
|
@ -318,7 +370,7 @@ void TimeManagement::increment_time_since_boot(const RegisterState&)
|
|||
constexpr time_t MaxSlewNanos = NanosPerTick / 100;
|
||||
static_assert(MaxSlewNanos < NanosPerTick);
|
||||
|
||||
u32 update_iteration = m_update1.fetch_add(1, AK::MemoryOrder::memory_order_relaxed);
|
||||
u32 update_iteration = m_update1.fetch_add(1, AK::MemoryOrder::memory_order_acquire);
|
||||
|
||||
// Clamp twice, to make sure intermediate fits into a long.
|
||||
long slew_nanos = clamp(clamp(m_remaining_epoch_time_adjustment.tv_sec, (time_t)-1, (time_t)1) * 1'000'000'000 + m_remaining_epoch_time_adjustment.tv_nsec, -MaxSlewNanos, MaxSlewNanos);
|
||||
|
@ -338,7 +390,7 @@ void TimeManagement::increment_time_since_boot(const RegisterState&)
|
|||
m_update2.store(update_iteration + 1, AK::MemoryOrder::memory_order_release);
|
||||
}
|
||||
|
||||
void TimeManagement::timer_tick(const RegisterState& regs)
|
||||
void TimeManagement::system_timer_tick(const RegisterState& regs)
|
||||
{
|
||||
if (Processor::current().in_irq() <= 1) {
|
||||
// Don't expire timers while handling IRQs
|
||||
|
|
|
@ -34,10 +34,15 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
#define OPTIMAL_TICKS_PER_SECOND_RATE 1000
|
||||
#define OPTIMAL_TICKS_PER_SECOND_RATE 250
|
||||
|
||||
class HardwareTimerBase;
|
||||
|
||||
enum class TimePrecision {
|
||||
Coarse = 0,
|
||||
Precise
|
||||
};
|
||||
|
||||
class TimeManagement {
|
||||
AK_MAKE_ETERNAL;
|
||||
|
||||
|
@ -47,12 +52,15 @@ public:
|
|||
static void initialize(u32 cpu);
|
||||
static TimeManagement& the();
|
||||
|
||||
static timespec ticks_to_time(u64 ticks, time_t ticks_per_second);
|
||||
static u64 time_to_ticks(const timespec& tspec, time_t ticks_per_second);
|
||||
|
||||
KResultOr<timespec> current_time(clockid_t clock_id) const;
|
||||
timespec monotonic_time() const;
|
||||
timespec epoch_time() const;
|
||||
static bool is_valid_clock_id(clockid_t);
|
||||
KResultOr<timespec> current_time(clockid_t) const;
|
||||
timespec monotonic_time(TimePrecision = TimePrecision::Coarse) const;
|
||||
timespec monotonic_time_raw() const
|
||||
{
|
||||
// TODO: implement
|
||||
return monotonic_time(TimePrecision::Precise);
|
||||
}
|
||||
timespec epoch_time(TimePrecision = TimePrecision::Precise) const;
|
||||
void set_epoch_time(timespec);
|
||||
time_t ticks_per_second() const;
|
||||
time_t boot_time() const;
|
||||
|
@ -60,12 +68,13 @@ public:
|
|||
bool is_system_timer(const HardwareTimerBase&) const;
|
||||
|
||||
static void update_time(const RegisterState&);
|
||||
void increment_time_since_boot(const RegisterState&);
|
||||
static void update_time_hpet(const RegisterState&);
|
||||
void increment_time_since_boot_hpet();
|
||||
void increment_time_since_boot();
|
||||
|
||||
static bool is_hpet_periodic_mode_allowed();
|
||||
|
||||
u64 uptime_ms() const;
|
||||
u64 monotonic_ticks() const;
|
||||
static timeval now_as_timeval();
|
||||
|
||||
timespec remaining_epoch_time_adjustment() const { return m_remaining_epoch_time_adjustment; }
|
||||
|
@ -78,16 +87,19 @@ private:
|
|||
Vector<HardwareTimerBase*> scan_for_non_periodic_timers();
|
||||
NonnullRefPtrVector<HardwareTimerBase> m_hardware_timers;
|
||||
void set_system_timer(HardwareTimerBase&);
|
||||
static void timer_tick(const RegisterState&);
|
||||
static void system_timer_tick(const RegisterState&);
|
||||
|
||||
// Variables between m_update1 and m_update2 are synchronized
|
||||
Atomic<u32> m_update1 { 0 };
|
||||
u32 m_ticks_this_second { 0 };
|
||||
u32 m_seconds_since_boot { 0 };
|
||||
u64 m_seconds_since_boot { 0 };
|
||||
timespec m_epoch_time { 0, 0 };
|
||||
timespec m_remaining_epoch_time_adjustment { 0, 0 };
|
||||
Atomic<u32> m_update2 { 0 };
|
||||
|
||||
u32 m_time_ticks_per_second { 0 }; // may be different from interrupts/second (e.g. hpet)
|
||||
bool m_can_query_precise_time { false };
|
||||
|
||||
RefPtr<HardwareTimerBase> m_system_timer;
|
||||
RefPtr<HardwareTimerBase> m_time_keeper_timer;
|
||||
};
|
||||
|
|
|
@ -38,16 +38,45 @@ namespace Kernel {
|
|||
static AK::Singleton<TimerQueue> s_the;
|
||||
static SpinLock<u8> g_timerqueue_lock;
|
||||
|
||||
ALWAYS_INLINE static u64 time_to_ns(const timespec& ts)
|
||||
{
|
||||
return (u64)ts.tv_sec * 1000000000ull + ts.tv_nsec;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static timespec ns_to_time(u64 ns)
|
||||
{
|
||||
return { (time_t)(ns / 1000000000ull), (long)(ns % 1000000000ull) };
|
||||
}
|
||||
|
||||
timespec Timer::remaining() const
|
||||
{
|
||||
if (m_remaining == 0)
|
||||
return {};
|
||||
return TimerQueue::the().ticks_to_time(m_clock_id, m_remaining);
|
||||
return ns_to_time(m_remaining);
|
||||
}
|
||||
|
||||
u64 Timer::now() const
|
||||
u64 Timer::now(bool is_firing) const
|
||||
{
|
||||
return TimerQueue::the().time_to_ticks(m_clock_id, TimeManagement::the().current_time(m_clock_id).value());
|
||||
// NOTE: If is_firing is true then TimePrecision::Precise isn't really useful here.
|
||||
// We already have a quite precise time stamp because we just updated the time in the
|
||||
// interrupt handler. In those cases, just use coarse timestamps.
|
||||
auto clock_id = m_clock_id;
|
||||
if (is_firing) {
|
||||
switch (clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
clock_id = CLOCK_MONOTONIC_COARSE;
|
||||
break;
|
||||
case CLOCK_MONOTONIC_RAW:
|
||||
// TODO: use a special CLOCK_MONOTONIC_RAW_COARSE like mechanism here
|
||||
break;
|
||||
case CLOCK_REALTIME:
|
||||
clock_id = CLOCK_REALTIME_COARSE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return time_to_ns(TimeManagement::the().current_time(clock_id).value());
|
||||
}
|
||||
|
||||
TimerQueue& TimerQueue::the()
|
||||
|
@ -70,7 +99,7 @@ RefPtr<Timer> TimerQueue::add_timer_without_id(clockid_t clock_id, const timespe
|
|||
// *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
|
||||
// inadvertently cancel another timer that has been created between
|
||||
// returning from the timer handler and a call to cancel_timer().
|
||||
auto timer = adopt(*new Timer(clock_id, time_to_ticks(clock_id, deadline), move(callback)));
|
||||
auto timer = adopt(*new Timer(clock_id, time_to_ns(deadline), move(callback)));
|
||||
|
||||
ScopedSpinLock lock(g_timerqueue_lock);
|
||||
timer->m_id = 0; // Don't generate a timer id
|
||||
|
@ -91,7 +120,6 @@ TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
|
|||
void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
|
||||
{
|
||||
u64 timer_expiration = timer->m_expires;
|
||||
ASSERT(timer_expiration >= time_to_ticks(timer->m_clock_id, TimeManagement::the().current_time(timer->m_clock_id).value()));
|
||||
|
||||
ASSERT(!timer->is_queued());
|
||||
|
||||
|
@ -123,43 +151,7 @@ TimerId TimerQueue::add_timer(clockid_t clock_id, timeval& deadline, Function<vo
|
|||
{
|
||||
auto expires = TimeManagement::the().current_time(clock_id).value();
|
||||
timespec_add_timeval(expires, deadline, expires);
|
||||
return add_timer(adopt(*new Timer(clock_id, time_to_ticks(clock_id, expires), move(callback))));
|
||||
}
|
||||
|
||||
timespec TimerQueue::ticks_to_time(clockid_t clock_id, u64 ticks) const
|
||||
{
|
||||
timespec tspec;
|
||||
switch (clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
tspec.tv_sec = ticks / m_ticks_per_second;
|
||||
tspec.tv_nsec = (ticks % m_ticks_per_second) * (1'000'000'000 / m_ticks_per_second);
|
||||
break;
|
||||
case CLOCK_REALTIME:
|
||||
tspec.tv_sec = ticks / 1'000'000'000;
|
||||
tspec.tv_nsec = ticks % 1'000'000'000;
|
||||
break;
|
||||
default:
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
ASSERT(tspec.tv_nsec <= 1'000'000'000);
|
||||
return tspec;
|
||||
}
|
||||
|
||||
u64 TimerQueue::time_to_ticks(clockid_t clock_id, const timespec& tspec) const
|
||||
{
|
||||
u64 ticks;
|
||||
switch (clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
ticks = (u64)tspec.tv_sec * m_ticks_per_second;
|
||||
ticks += ((u64)tspec.tv_nsec * m_ticks_per_second) / 1'000'000'000;
|
||||
break;
|
||||
case CLOCK_REALTIME:
|
||||
ticks = (u64)tspec.tv_sec * 1'000'000'000 + tspec.tv_nsec;
|
||||
break;
|
||||
default:
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
return ticks;
|
||||
return add_timer(adopt(*new Timer(clock_id, time_to_ns(expires), move(callback))));
|
||||
}
|
||||
|
||||
bool TimerQueue::cancel_timer(TimerId id)
|
||||
|
@ -249,7 +241,7 @@ void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
|
|||
bool was_next_timer = (queue.list.head() == &timer);
|
||||
queue.list.remove(&timer);
|
||||
timer.set_queued(false);
|
||||
auto now = timer.now();
|
||||
auto now = timer.now(false);
|
||||
if (timer.m_expires > now)
|
||||
timer.m_remaining = timer.m_expires - now;
|
||||
|
||||
|
@ -270,7 +262,7 @@ void TimerQueue::fire()
|
|||
ASSERT(timer);
|
||||
ASSERT(queue.next_timer_due == timer->m_expires);
|
||||
|
||||
while (timer && timer->now() > timer->m_expires) {
|
||||
while (timer && timer->now(true) > timer->m_expires) {
|
||||
queue.list.remove(timer);
|
||||
timer->set_queued(false);
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ private:
|
|||
}
|
||||
bool is_queued() const { return m_queued.load(AK::MemoryOrder::memory_order_relaxed); }
|
||||
void set_queued(bool queued) { m_queued.store(queued, AK::MemoryOrder::memory_order_relaxed); }
|
||||
u64 now() const;
|
||||
u64 now(bool) const;
|
||||
};
|
||||
|
||||
class TimerQueue {
|
||||
|
@ -114,17 +114,17 @@ private:
|
|||
{
|
||||
switch (timer.m_clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
case CLOCK_MONOTONIC_COARSE:
|
||||
case CLOCK_MONOTONIC_RAW:
|
||||
return m_timer_queue_monotonic;
|
||||
case CLOCK_REALTIME:
|
||||
case CLOCK_REALTIME_COARSE:
|
||||
return m_timer_queue_realtime;
|
||||
default:
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
}
|
||||
|
||||
timespec ticks_to_time(clockid_t, u64 ticks) const;
|
||||
u64 time_to_ticks(clockid_t, const timespec&) const;
|
||||
|
||||
u64 m_timer_id_count { 0 };
|
||||
u64 m_ticks_per_second { 0 };
|
||||
Queue m_timer_queue_monotonic;
|
||||
|
|
|
@ -548,6 +548,9 @@ typedef int clockid_t;
|
|||
|
||||
#define CLOCK_REALTIME 0
|
||||
#define CLOCK_MONOTONIC 1
|
||||
#define CLOCK_MONOTONIC_RAW 4
|
||||
#define CLOCK_REALTIME_COARSE 5
|
||||
#define CLOCK_MONOTONIC_COARSE 6
|
||||
#define TIMER_ABSTIME 99
|
||||
|
||||
#define UTSNAME_ENTRY_LEN 65
|
||||
|
|
|
@ -72,6 +72,9 @@ typedef int clockid_t;
|
|||
|
||||
#define CLOCK_REALTIME 0
|
||||
#define CLOCK_MONOTONIC 1
|
||||
#define CLOCK_MONOTONIC_RAW 4
|
||||
#define CLOCK_REALTIME_COARSE 5
|
||||
#define CLOCK_MONOTONIC_COARSE 6
|
||||
#define TIMER_ABSTIME 99
|
||||
|
||||
int clock_gettime(clockid_t, struct timespec*);
|
||||
|
|
|
@ -339,7 +339,7 @@ char* getwd(char* buf)
|
|||
int sleep(unsigned seconds)
|
||||
{
|
||||
struct timespec ts = { seconds, 0 };
|
||||
if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, nullptr) < 0)
|
||||
if (clock_nanosleep(CLOCK_MONOTONIC_COARSE, 0, &ts, nullptr) < 0)
|
||||
return ts.tv_sec;
|
||||
return 0;
|
||||
}
|
||||
|
@ -347,7 +347,7 @@ int sleep(unsigned seconds)
|
|||
int usleep(useconds_t usec)
|
||||
{
|
||||
struct timespec ts = { (long)(usec / 1000000), (long)(usec % 1000000) * 1000 };
|
||||
return clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, nullptr);
|
||||
return clock_nanosleep(CLOCK_MONOTONIC_COARSE, 0, &ts, nullptr);
|
||||
}
|
||||
|
||||
int gethostname(char* buffer, size_t size)
|
||||
|
|
|
@ -36,7 +36,7 @@ void ElapsedTimer::start()
|
|||
{
|
||||
m_valid = true;
|
||||
timespec now_spec;
|
||||
clock_gettime(CLOCK_MONOTONIC, &now_spec);
|
||||
clock_gettime(m_precise ? CLOCK_MONOTONIC : CLOCK_MONOTONIC_COARSE, &now_spec);
|
||||
m_origin_time.tv_sec = now_spec.tv_sec;
|
||||
m_origin_time.tv_usec = now_spec.tv_nsec / 1000;
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ int ElapsedTimer::elapsed() const
|
|||
ASSERT(is_valid());
|
||||
struct timeval now;
|
||||
timespec now_spec;
|
||||
clock_gettime(CLOCK_MONOTONIC, &now_spec);
|
||||
clock_gettime(m_precise ? CLOCK_MONOTONIC : CLOCK_MONOTONIC_COARSE, &now_spec);
|
||||
now.tv_sec = now_spec.tv_sec;
|
||||
now.tv_usec = now_spec.tv_nsec / 1000;
|
||||
struct timeval diff;
|
||||
|
|
|
@ -32,7 +32,10 @@ namespace Core {
|
|||
|
||||
class ElapsedTimer {
|
||||
public:
|
||||
ElapsedTimer() { }
|
||||
ElapsedTimer(bool precise = false)
|
||||
: m_precise(precise)
|
||||
{
|
||||
}
|
||||
|
||||
bool is_valid() const { return m_valid; }
|
||||
void start();
|
||||
|
@ -41,6 +44,7 @@ public:
|
|||
const struct timeval& origin_time() const { return m_origin_time; }
|
||||
|
||||
private:
|
||||
bool m_precise { false };
|
||||
bool m_valid { false };
|
||||
struct timeval m_origin_time {
|
||||
0, 0
|
||||
|
|
|
@ -580,7 +580,7 @@ retry:
|
|||
auto next_timer_expiration = get_next_timer_expiration();
|
||||
if (next_timer_expiration.has_value()) {
|
||||
timespec now_spec;
|
||||
clock_gettime(CLOCK_MONOTONIC, &now_spec);
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &now_spec);
|
||||
now.tv_sec = now_spec.tv_sec;
|
||||
now.tv_usec = now_spec.tv_nsec / 1000;
|
||||
timeval_sub(next_timer_expiration.value(), now, timeout);
|
||||
|
@ -631,7 +631,7 @@ try_select_again:
|
|||
|
||||
if (!s_timers->is_empty()) {
|
||||
timespec now_spec;
|
||||
clock_gettime(CLOCK_MONOTONIC, &now_spec);
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &now_spec);
|
||||
now.tv_sec = now_spec.tv_sec;
|
||||
now.tv_usec = now_spec.tv_nsec / 1000;
|
||||
}
|
||||
|
@ -709,7 +709,7 @@ int EventLoop::register_timer(Object& object, int milliseconds, bool should_relo
|
|||
timer->interval = milliseconds;
|
||||
timeval now;
|
||||
timespec now_spec;
|
||||
clock_gettime(CLOCK_MONOTONIC, &now_spec);
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &now_spec);
|
||||
now.tv_sec = now_spec.tv_sec;
|
||||
now.tv_usec = now_spec.tv_nsec / 1000;
|
||||
timer->reload(now);
|
||||
|
|
|
@ -85,7 +85,8 @@ HashMap<pid_t, Core::ProcessStatistics> ProcessStatisticsReader::get_all()
|
|||
thread.times_scheduled = thread_object.get("times_scheduled").to_u32();
|
||||
thread.name = thread_object.get("name").to_string();
|
||||
thread.state = thread_object.get("state").to_string();
|
||||
thread.ticks = thread_object.get("ticks").to_u32();
|
||||
thread.ticks_user = thread_object.get("ticks_user").to_u32();
|
||||
thread.ticks_kernel = thread_object.get("ticks_kernel").to_u32();
|
||||
thread.cpu = thread_object.get("cpu").to_u32();
|
||||
thread.priority = thread_object.get("priority").to_u32();
|
||||
thread.effective_priority = thread_object.get("effective_priority").to_u32();
|
||||
|
|
|
@ -35,7 +35,8 @@ namespace Core {
|
|||
struct ThreadStatistics {
|
||||
pid_t tid;
|
||||
unsigned times_scheduled;
|
||||
unsigned ticks;
|
||||
unsigned ticks_user;
|
||||
unsigned ticks_kernel;
|
||||
unsigned syscall_count;
|
||||
unsigned inode_faults;
|
||||
unsigned zero_faults;
|
||||
|
|
|
@ -474,7 +474,7 @@ int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr)
|
|||
{
|
||||
cond->value = 0;
|
||||
cond->previous = 0;
|
||||
cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC;
|
||||
cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC_COARSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
|
|||
|
||||
int pthread_condattr_init(pthread_condattr_t* attr)
|
||||
{
|
||||
attr->clockid = CLOCK_MONOTONIC;
|
||||
attr->clockid = CLOCK_MONOTONIC_COARSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -82,9 +82,9 @@ int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param
|
|||
{ \
|
||||
0, 0, 0, PTHREAD_MUTEX_DEFAULT \
|
||||
}
|
||||
#define PTHREAD_COND_INITIALIZER \
|
||||
{ \
|
||||
0, 0, CLOCK_MONOTONIC \
|
||||
#define PTHREAD_COND_INITIALIZER \
|
||||
{ \
|
||||
0, 0, CLOCK_MONOTONIC_COARSE \
|
||||
}
|
||||
|
||||
int pthread_key_create(pthread_key_t* key, void (*destructor)(void*));
|
||||
|
|
|
@ -135,9 +135,9 @@ private:
|
|||
for (auto& it : all_processes) {
|
||||
for (auto& jt : it.value.threads) {
|
||||
if (it.value.pid == 0)
|
||||
idle += jt.times_scheduled;
|
||||
idle += jt.ticks_user + jt.ticks_kernel;
|
||||
else
|
||||
busy += jt.times_scheduled;
|
||||
busy += jt.ticks_user + jt.ticks_kernel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ static struct timeval get_current_time()
|
|||
{
|
||||
struct timespec ts;
|
||||
struct timeval tv;
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
|
||||
timespec_to_timeval(ts, tv);
|
||||
return tv;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue