TimeManagement.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /*
  2. * Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
  3. * Copyright (c) 2022, Timon Kruiper <timonkruiper@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/Singleton.h>
  8. #include <AK/StdLibExtras.h>
  9. #include <AK/Time.h>
  10. #if ARCH(X86_64)
  11. # include <Kernel/Arch/x86_64/Interrupts/APIC.h>
  12. # include <Kernel/Arch/x86_64/RTC.h>
  13. # include <Kernel/Arch/x86_64/Time/APICTimer.h>
  14. # include <Kernel/Arch/x86_64/Time/HPET.h>
  15. # include <Kernel/Arch/x86_64/Time/HPETComparator.h>
  16. # include <Kernel/Arch/x86_64/Time/PIT.h>
  17. # include <Kernel/Arch/x86_64/Time/RTC.h>
  18. #elif ARCH(AARCH64)
  19. # include <Kernel/Arch/aarch64/RPi/Timer.h>
  20. #else
  21. # error Unknown architecture
  22. #endif
  23. #include <Kernel/Arch/CurrentTime.h>
  24. #include <Kernel/CommandLine.h>
  25. #include <Kernel/Firmware/ACPI/Parser.h>
  26. #include <Kernel/InterruptDisabler.h>
  27. #include <Kernel/PerformanceManager.h>
  28. #include <Kernel/Scheduler.h>
  29. #include <Kernel/Sections.h>
  30. #include <Kernel/Time/HardwareTimer.h>
  31. #include <Kernel/Time/TimeManagement.h>
  32. #include <Kernel/TimerQueue.h>
  33. namespace Kernel {
  34. static Singleton<TimeManagement> s_the;
  35. bool TimeManagement::is_initialized()
  36. {
  37. return s_the.is_initialized();
  38. }
  39. TimeManagement& TimeManagement::the()
  40. {
  41. return *s_the;
  42. }
  43. // The s_scheduler_specific_current_time function provides a current time for scheduling purposes,
  44. // which may not necessarily relate to wall time
  45. static u64 (*s_scheduler_current_time)();
  46. static u64 current_time_monotonic()
  47. {
  48. // We always need a precise timestamp here, we cannot rely on a coarse timestamp
  49. return (u64)TimeManagement::the().monotonic_time(TimePrecision::Precise).to_nanoseconds();
  50. }
  51. u64 TimeManagement::scheduler_current_time()
  52. {
  53. VERIFY(s_scheduler_current_time);
  54. return s_scheduler_current_time();
  55. }
  56. ErrorOr<void> TimeManagement::validate_clock_id(clockid_t clock_id)
  57. {
  58. switch (clock_id) {
  59. case CLOCK_MONOTONIC:
  60. case CLOCK_MONOTONIC_COARSE:
  61. case CLOCK_MONOTONIC_RAW:
  62. case CLOCK_REALTIME:
  63. case CLOCK_REALTIME_COARSE:
  64. return {};
  65. default:
  66. return EINVAL;
  67. };
  68. }
  69. Duration TimeManagement::current_time(clockid_t clock_id) const
  70. {
  71. switch (clock_id) {
  72. case CLOCK_MONOTONIC:
  73. return monotonic_time(TimePrecision::Precise);
  74. case CLOCK_MONOTONIC_COARSE:
  75. return monotonic_time(TimePrecision::Coarse);
  76. case CLOCK_MONOTONIC_RAW:
  77. return monotonic_time_raw();
  78. case CLOCK_REALTIME:
  79. return epoch_time(TimePrecision::Precise).offset_to_epoch();
  80. case CLOCK_REALTIME_COARSE:
  81. return epoch_time(TimePrecision::Coarse).offset_to_epoch();
  82. default:
  83. // Syscall entrypoint is missing a is_valid_clock_id(..) check?
  84. VERIFY_NOT_REACHED();
  85. }
  86. }
  87. bool TimeManagement::is_system_timer(HardwareTimerBase const& timer) const
  88. {
  89. return &timer == m_system_timer.ptr();
  90. }
  91. void TimeManagement::set_epoch_time(UnixDateTime ts)
  92. {
  93. // FIXME: The interrupt disabler intends to enforce atomic update of epoch time and remaining adjustment,
  94. // but that sort of assumption is known to break on SMP.
  95. InterruptDisabler disabler;
  96. m_epoch_time = ts;
  97. m_remaining_epoch_time_adjustment = {};
  98. }
  99. Duration TimeManagement::monotonic_time(TimePrecision precision) const
  100. {
  101. // This is the time when last updated by an interrupt.
  102. u64 seconds;
  103. u32 ticks;
  104. bool do_query = precision == TimePrecision::Precise && m_can_query_precise_time;
  105. u32 update_iteration;
  106. do {
  107. update_iteration = m_update1.load(AK::MemoryOrder::memory_order_acquire);
  108. seconds = m_seconds_since_boot;
  109. ticks = m_ticks_this_second;
  110. if (do_query) {
  111. #if ARCH(X86_64)
  112. // We may have to do this over again if the timer interrupt fires
  113. // while we're trying to query the information. In that case, our
  114. // seconds and ticks became invalid, producing an incorrect time.
  115. // Be sure to not modify m_seconds_since_boot and m_ticks_this_second
  116. // because this may only be modified by the interrupt handler
  117. HPET::the().update_time(seconds, ticks, true);
  118. #elif ARCH(AARCH64)
  119. // FIXME: Get rid of these horrible casts
  120. const_cast<RPi::Timer*>(static_cast<RPi::Timer const*>(m_system_timer.ptr()))->update_time(seconds, ticks, true);
  121. #else
  122. # error Unknown architecture
  123. #endif
  124. }
  125. } while (update_iteration != m_update2.load(AK::MemoryOrder::memory_order_acquire));
  126. VERIFY(m_time_ticks_per_second > 0);
  127. VERIFY(ticks < m_time_ticks_per_second);
  128. u64 ns = ((u64)ticks * 1000000000ull) / m_time_ticks_per_second;
  129. VERIFY(ns < 1000000000ull);
  130. return Duration::from_timespec({ (i64)seconds, (i32)ns });
  131. }
  132. UnixDateTime TimeManagement::epoch_time(TimePrecision) const
  133. {
  134. // TODO: Take into account precision
  135. UnixDateTime time;
  136. u32 update_iteration;
  137. do {
  138. update_iteration = m_update1.load(AK::MemoryOrder::memory_order_acquire);
  139. time = m_epoch_time;
  140. } while (update_iteration != m_update2.load(AK::MemoryOrder::memory_order_acquire));
  141. return time;
  142. }
  143. u64 TimeManagement::uptime_ms() const
  144. {
  145. auto mtime = monotonic_time().to_timespec();
  146. // This overflows after 292 million years of uptime.
  147. // Since this is only used for performance timestamps and sys$times, that's probably enough.
  148. u64 ms = mtime.tv_sec * 1000ull;
  149. ms += mtime.tv_nsec / 1000000;
  150. return ms;
  151. }
  152. UNMAP_AFTER_INIT void TimeManagement::initialize([[maybe_unused]] u32 cpu)
  153. {
  154. // Note: We must disable interrupts, because the timers interrupt might fire before
  155. // the TimeManagement class is completely initialized.
  156. InterruptDisabler disabler;
  157. #if ARCH(X86_64)
  158. if (cpu == 0) {
  159. VERIFY(!s_the.is_initialized());
  160. s_the.ensure_instance();
  161. if (APIC::initialized()) {
  162. // Initialize the APIC timers after the other timers as the
  163. // initialization needs to briefly enable interrupts, which then
  164. // would trigger a deadlock trying to get the s_the instance while
  165. // creating it.
  166. if (auto* apic_timer = APIC::the().initialize_timers(*s_the->m_system_timer)) {
  167. dmesgln("Duration: Using APIC timer as system timer");
  168. s_the->set_system_timer(*apic_timer);
  169. }
  170. }
  171. } else {
  172. VERIFY(s_the.is_initialized());
  173. if (auto* apic_timer = APIC::the().get_timer()) {
  174. dmesgln("Duration: Enable APIC timer on CPU #{}", cpu);
  175. apic_timer->enable_local_timer();
  176. }
  177. }
  178. #elif ARCH(AARCH64)
  179. if (cpu == 0) {
  180. VERIFY(!s_the.is_initialized());
  181. s_the.ensure_instance();
  182. }
  183. #else
  184. # error Unknown architecture
  185. #endif
  186. auto* possible_arch_specific_current_time_function = optional_current_time();
  187. if (possible_arch_specific_current_time_function)
  188. s_scheduler_current_time = possible_arch_specific_current_time_function;
  189. else
  190. s_scheduler_current_time = current_time_monotonic;
  191. }
  192. void TimeManagement::set_system_timer(HardwareTimerBase& timer)
  193. {
  194. VERIFY(Processor::is_bootstrap_processor()); // This should only be called on the BSP!
  195. auto original_callback = m_system_timer->set_callback(nullptr);
  196. m_system_timer->disable();
  197. timer.set_callback(move(original_callback));
  198. m_system_timer = timer;
  199. }
  200. time_t TimeManagement::ticks_per_second() const
  201. {
  202. return m_time_keeper_timer->ticks_per_second();
  203. }
  204. UnixDateTime TimeManagement::boot_time()
  205. {
  206. #if ARCH(X86_64)
  207. return RTC::boot_time();
  208. #elif ARCH(AARCH64)
  209. // FIXME: Return correct boot time
  210. return Duration::from_seconds(0);
  211. #else
  212. # error Unknown architecture
  213. #endif
  214. }
  215. Duration TimeManagement::clock_resolution() const
  216. {
  217. long nanoseconds_per_tick = 1'000'000'000 / m_time_keeper_timer->ticks_per_second();
  218. return Duration::from_nanoseconds(nanoseconds_per_tick);
  219. }
  220. UNMAP_AFTER_INIT TimeManagement::TimeManagement()
  221. : m_time_page_region(MM.allocate_kernel_region(PAGE_SIZE, "Duration page"sv, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value_but_fixme_should_propagate_errors())
  222. {
  223. #if ARCH(X86_64)
  224. bool probe_non_legacy_hardware_timers = !(kernel_command_line().is_legacy_time_enabled());
  225. if (ACPI::is_enabled()) {
  226. if (!ACPI::Parser::the()->x86_specific_flags().cmos_rtc_not_present) {
  227. RTC::initialize();
  228. m_epoch_time += boot_time().offset_to_epoch();
  229. } else {
  230. dmesgln("ACPI: RTC CMOS Not present");
  231. }
  232. } else {
  233. // We just assume that we can access RTC CMOS, if ACPI isn't usable.
  234. RTC::initialize();
  235. m_epoch_time += boot_time().offset_to_epoch();
  236. }
  237. if (probe_non_legacy_hardware_timers) {
  238. if (!probe_and_set_x86_non_legacy_hardware_timers())
  239. if (!probe_and_set_x86_legacy_hardware_timers())
  240. VERIFY_NOT_REACHED();
  241. } else if (!probe_and_set_x86_legacy_hardware_timers()) {
  242. VERIFY_NOT_REACHED();
  243. }
  244. #elif ARCH(AARCH64)
  245. probe_and_set_aarch64_hardware_timers();
  246. #else
  247. # error Unknown architecture
  248. #endif
  249. }
  250. UnixDateTime TimeManagement::now()
  251. {
  252. return s_the.ptr()->epoch_time();
  253. }
  254. UNMAP_AFTER_INIT Vector<HardwareTimerBase*> TimeManagement::scan_and_initialize_periodic_timers()
  255. {
  256. bool should_enable = is_hpet_periodic_mode_allowed();
  257. dbgln("Duration: Scanning for periodic timers");
  258. Vector<HardwareTimerBase*> timers;
  259. for (auto& hardware_timer : m_hardware_timers) {
  260. if (hardware_timer->is_periodic_capable()) {
  261. timers.append(hardware_timer);
  262. if (should_enable)
  263. hardware_timer->set_periodic();
  264. }
  265. }
  266. return timers;
  267. }
  268. UNMAP_AFTER_INIT Vector<HardwareTimerBase*> TimeManagement::scan_for_non_periodic_timers()
  269. {
  270. dbgln("Duration: Scanning for non-periodic timers");
  271. Vector<HardwareTimerBase*> timers;
  272. for (auto& hardware_timer : m_hardware_timers) {
  273. if (!hardware_timer->is_periodic_capable())
  274. timers.append(hardware_timer);
  275. }
  276. return timers;
  277. }
  278. bool TimeManagement::is_hpet_periodic_mode_allowed()
  279. {
  280. switch (kernel_command_line().hpet_mode()) {
  281. case HPETMode::Periodic:
  282. return true;
  283. case HPETMode::NonPeriodic:
  284. return false;
  285. default:
  286. VERIFY_NOT_REACHED();
  287. }
  288. }
  289. #if ARCH(X86_64)
  290. UNMAP_AFTER_INIT bool TimeManagement::probe_and_set_x86_non_legacy_hardware_timers()
  291. {
  292. if (!ACPI::is_enabled())
  293. return false;
  294. if (!HPET::test_and_initialize())
  295. return false;
  296. if (!HPET::the().comparators().size()) {
  297. dbgln("HPET initialization aborted.");
  298. return false;
  299. }
  300. dbgln("HPET: Setting appropriate functions to timers.");
  301. for (auto& hpet_comparator : HPET::the().comparators())
  302. m_hardware_timers.append(hpet_comparator);
  303. auto periodic_timers = scan_and_initialize_periodic_timers();
  304. auto non_periodic_timers = scan_for_non_periodic_timers();
  305. if (is_hpet_periodic_mode_allowed())
  306. VERIFY(!periodic_timers.is_empty());
  307. VERIFY(periodic_timers.size() + non_periodic_timers.size() > 0);
  308. size_t taken_periodic_timers_count = 0;
  309. size_t taken_non_periodic_timers_count = 0;
  310. if (periodic_timers.size() > taken_periodic_timers_count) {
  311. m_system_timer = periodic_timers[taken_periodic_timers_count];
  312. taken_periodic_timers_count += 1;
  313. } else if (non_periodic_timers.size() > taken_non_periodic_timers_count) {
  314. m_system_timer = non_periodic_timers[taken_non_periodic_timers_count];
  315. taken_non_periodic_timers_count += 1;
  316. }
  317. m_system_timer->set_callback([this](RegisterState const& regs) {
  318. // Update the time. We don't really care too much about the
  319. // frequency of the interrupt because we'll query the main
  320. // counter to get an accurate time.
  321. if (Processor::is_bootstrap_processor()) {
  322. // TODO: Have the other CPUs call system_timer_tick directly
  323. increment_time_since_boot_hpet();
  324. }
  325. system_timer_tick(regs);
  326. });
  327. // Use the HPET main counter frequency for time purposes. This is likely
  328. // a much higher frequency than the interrupt itself and allows us to
  329. // keep a more accurate time
  330. m_can_query_precise_time = true;
  331. m_time_ticks_per_second = HPET::the().frequency();
  332. m_system_timer->try_to_set_frequency(m_system_timer->calculate_nearest_possible_frequency(OPTIMAL_TICKS_PER_SECOND_RATE));
  333. // We don't need an interrupt for time keeping purposes because we
  334. // can query the timer.
  335. m_time_keeper_timer = m_system_timer;
  336. if (periodic_timers.size() > taken_periodic_timers_count) {
  337. m_profile_timer = periodic_timers[taken_periodic_timers_count];
  338. taken_periodic_timers_count += 1;
  339. } else if (non_periodic_timers.size() > taken_non_periodic_timers_count) {
  340. m_profile_timer = non_periodic_timers[taken_non_periodic_timers_count];
  341. taken_non_periodic_timers_count += 1;
  342. }
  343. if (m_profile_timer) {
  344. m_profile_timer->set_callback(PerformanceManager::timer_tick);
  345. m_profile_timer->try_to_set_frequency(m_profile_timer->calculate_nearest_possible_frequency(1));
  346. }
  347. return true;
  348. }
  349. UNMAP_AFTER_INIT bool TimeManagement::probe_and_set_x86_legacy_hardware_timers()
  350. {
  351. if (ACPI::is_enabled()) {
  352. if (ACPI::Parser::the()->x86_specific_flags().cmos_rtc_not_present) {
  353. dbgln("ACPI: CMOS RTC Not Present");
  354. return false;
  355. } else {
  356. dbgln("ACPI: CMOS RTC Present");
  357. }
  358. }
  359. m_hardware_timers.append(PIT::initialize(TimeManagement::update_time));
  360. m_hardware_timers.append(RealTimeClock::create(TimeManagement::system_timer_tick));
  361. m_time_keeper_timer = m_hardware_timers[0];
  362. m_system_timer = m_hardware_timers[1];
  363. // The timer is only as accurate as the interrupts...
  364. m_time_ticks_per_second = m_time_keeper_timer->ticks_per_second();
  365. return true;
  366. }
  367. void TimeManagement::update_time(RegisterState const&)
  368. {
  369. TimeManagement::the().increment_time_since_boot();
  370. }
  371. void TimeManagement::increment_time_since_boot_hpet()
  372. {
  373. VERIFY(!m_time_keeper_timer.is_null());
  374. VERIFY(m_time_keeper_timer->timer_type() == HardwareTimerType::HighPrecisionEventTimer);
  375. // NOTE: m_seconds_since_boot and m_ticks_this_second are only ever
  376. // updated here! So we can safely read that information, query the clock,
  377. // and when we're all done we can update the information. This reduces
  378. // contention when other processors attempt to read the clock.
  379. auto seconds_since_boot = m_seconds_since_boot;
  380. auto ticks_this_second = m_ticks_this_second;
  381. auto delta_ns = HPET::the().update_time(seconds_since_boot, ticks_this_second, false);
  382. // Now that we have a precise time, go update it as quickly as we can
  383. u32 update_iteration = m_update2.fetch_add(1, AK::MemoryOrder::memory_order_acquire);
  384. m_seconds_since_boot = seconds_since_boot;
  385. m_ticks_this_second = ticks_this_second;
  386. // TODO: Apply m_remaining_epoch_time_adjustment
  387. timespec time_adjustment = { (time_t)(delta_ns / 1000000000), (long)(delta_ns % 1000000000) };
  388. m_epoch_time += Duration::from_timespec(time_adjustment);
  389. m_update1.store(update_iteration + 1, AK::MemoryOrder::memory_order_release);
  390. update_time_page();
  391. }
  392. #elif ARCH(AARCH64)
  393. UNMAP_AFTER_INIT bool TimeManagement::probe_and_set_aarch64_hardware_timers()
  394. {
  395. m_hardware_timers.append(RPi::Timer::initialize());
  396. m_system_timer = m_hardware_timers[0];
  397. m_time_ticks_per_second = m_system_timer->frequency();
  398. m_system_timer->set_callback([this](RegisterState const& regs) {
  399. auto seconds_since_boot = m_seconds_since_boot;
  400. auto ticks_this_second = m_ticks_this_second;
  401. auto delta_ns = static_cast<RPi::Timer*>(m_system_timer.ptr())->update_time(seconds_since_boot, ticks_this_second, false);
  402. u32 update_iteration = m_update2.fetch_add(1, AK::MemoryOrder::memory_order_acquire);
  403. m_seconds_since_boot = seconds_since_boot;
  404. m_ticks_this_second = ticks_this_second;
  405. timespec_add(m_epoch_time, { (time_t)(delta_ns / 1000000000), (long)(delta_ns % 1000000000) }, m_epoch_time);
  406. m_update1.store(update_iteration + 1, AK::MemoryOrder::memory_order_release);
  407. update_time_page();
  408. system_timer_tick(regs);
  409. });
  410. m_time_keeper_timer = m_system_timer;
  411. return true;
  412. }
  413. #else
  414. # error Unknown architecture
  415. #endif
  416. void TimeManagement::increment_time_since_boot()
  417. {
  418. VERIFY(!m_time_keeper_timer.is_null());
  419. // Compute time adjustment for adjtime. Let the clock run up to 1% fast or slow.
  420. // That way, adjtime can adjust up to 36 seconds per hour, without time getting very jumpy.
  421. // Once we have a smarter NTP service that also adjusts the frequency instead of just slewing time, maybe we can lower this.
  422. long nanos_per_tick = 1'000'000'000 / m_time_keeper_timer->frequency();
  423. time_t max_slew_nanos = nanos_per_tick / 100;
  424. u32 update_iteration = m_update2.fetch_add(1, AK::MemoryOrder::memory_order_acquire);
  425. auto slew_nanos = Duration::from_nanoseconds(
  426. clamp(m_remaining_epoch_time_adjustment.to_nanoseconds(), -max_slew_nanos, max_slew_nanos));
  427. m_remaining_epoch_time_adjustment -= slew_nanos;
  428. m_epoch_time += Duration::from_nanoseconds(nanos_per_tick + slew_nanos.to_nanoseconds());
  429. if (++m_ticks_this_second >= m_time_keeper_timer->ticks_per_second()) {
  430. // FIXME: Synchronize with other clock somehow to prevent drifting apart.
  431. ++m_seconds_since_boot;
  432. m_ticks_this_second = 0;
  433. }
  434. m_update1.store(update_iteration + 1, AK::MemoryOrder::memory_order_release);
  435. update_time_page();
  436. }
  437. void TimeManagement::system_timer_tick(RegisterState const& regs)
  438. {
  439. if (Processor::current_in_irq() <= 1) {
  440. // Don't expire timers while handling IRQs
  441. TimerQueue::the().fire();
  442. }
  443. Scheduler::timer_tick(regs);
  444. }
  445. bool TimeManagement::enable_profile_timer()
  446. {
  447. if (!m_profile_timer)
  448. return false;
  449. if (m_profile_enable_count.fetch_add(1) == 0)
  450. return m_profile_timer->try_to_set_frequency(m_profile_timer->calculate_nearest_possible_frequency(OPTIMAL_PROFILE_TICKS_PER_SECOND_RATE));
  451. return true;
  452. }
  453. bool TimeManagement::disable_profile_timer()
  454. {
  455. if (!m_profile_timer)
  456. return false;
  457. if (m_profile_enable_count.fetch_sub(1) == 1)
  458. return m_profile_timer->try_to_set_frequency(m_profile_timer->calculate_nearest_possible_frequency(1));
  459. return true;
  460. }
  461. void TimeManagement::update_time_page()
  462. {
  463. auto& page = time_page();
  464. u32 update_iteration = AK::atomic_fetch_add(&page.update2, 1u, AK::MemoryOrder::memory_order_acquire);
  465. page.clocks[CLOCK_REALTIME_COARSE] = m_epoch_time.to_timespec();
  466. page.clocks[CLOCK_MONOTONIC_COARSE] = monotonic_time(TimePrecision::Coarse).to_timespec();
  467. AK::atomic_store(&page.update1, update_iteration + 1u, AK::MemoryOrder::memory_order_release);
  468. }
  469. TimePage& TimeManagement::time_page()
  470. {
  471. return *static_cast<TimePage*>((void*)m_time_page_region->vaddr().as_ptr());
  472. }
  473. Memory::VMObject& TimeManagement::time_page_vmobject()
  474. {
  475. return m_time_page_region->vmobject();
  476. }
  477. }