Random.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2020, Peter Elliott <pelliott@ualberta.ca>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/Singleton.h>
  8. #include <Kernel/Arch/x86/CPU.h>
  9. #include <Kernel/Devices/RandomDevice.h>
  10. #include <Kernel/Random.h>
  11. #include <Kernel/Time/HPET.h>
  12. #include <Kernel/Time/RTC.h>
  13. #include <Kernel/Time/TimeManagement.h>
  14. namespace Kernel {
  15. static AK::Singleton<KernelRng> s_the;
  16. KernelRng& KernelRng::the()
  17. {
  18. return *s_the;
  19. }
  20. UNMAP_AFTER_INIT KernelRng::KernelRng()
  21. {
  22. bool supports_rdseed = Processor::current().has_feature(CPUFeature::RDSEED);
  23. bool supports_rdrand = Processor::current().has_feature(CPUFeature::RDRAND);
  24. if (supports_rdseed || supports_rdrand) {
  25. dmesgln("KernelRng: Using RDSEED or RDRAND as entropy source");
  26. for (size_t i = 0; i < resource().pool_count * resource().reseed_threshold; ++i) {
  27. u32 value = 0;
  28. if (supports_rdseed) {
  29. asm volatile(
  30. "1:\n"
  31. "rdseed %0\n"
  32. "jnc 1b\n"
  33. : "=r"(value));
  34. } else {
  35. asm volatile(
  36. "1:\n"
  37. "rdrand %0\n"
  38. "jnc 1b\n"
  39. : "=r"(value));
  40. }
  41. this->resource().add_random_event(value, i % 32);
  42. }
  43. } else if (TimeManagement::the().can_query_precise_time()) {
  44. // Add HPET as entropy source if we don't have anything better.
  45. dmesgln("KernelRng: Using HPET as entropy source");
  46. for (size_t i = 0; i < resource().pool_count * resource().reseed_threshold; ++i) {
  47. u64 hpet_time = HPET::the().read_main_counter_unsafe();
  48. this->resource().add_random_event(hpet_time, i % 32);
  49. }
  50. } else {
  51. // Fallback to RTC
  52. dmesgln("KernelRng: Using RTC as entropy source (bad!)");
  53. auto current_time = static_cast<u64>(RTC::now());
  54. for (size_t i = 0; i < resource().pool_count * resource().reseed_threshold; ++i) {
  55. this->resource().add_random_event(current_time, i % 32);
  56. current_time *= 0x574au;
  57. current_time += 0x40b2u;
  58. }
  59. }
  60. }
  61. void KernelRng::wait_for_entropy()
  62. {
  63. ScopedSpinLock lock(get_lock());
  64. if (!resource().is_ready()) {
  65. dbgln("Entropy starvation...");
  66. m_seed_queue.wait_forever("KernelRng");
  67. }
  68. }
  69. void KernelRng::wake_if_ready()
  70. {
  71. VERIFY(get_lock().is_locked());
  72. if (resource().is_ready()) {
  73. m_seed_queue.wake_all();
  74. }
  75. }
  76. size_t EntropySource::next_source { static_cast<size_t>(EntropySource::Static::MaxHardcodedSourceIndex) };
  77. static void do_get_fast_random_bytes(u8* buffer, size_t buffer_size)
  78. {
  79. static Atomic<u32, AK::MemoryOrder::memory_order_relaxed> next = 1;
  80. union {
  81. u8 bytes[4];
  82. u32 value;
  83. } u;
  84. size_t offset = 4;
  85. for (size_t i = 0; i < buffer_size; ++i) {
  86. if (offset >= 4) {
  87. auto current_next = next.load();
  88. for (;;) {
  89. auto new_next = current_next * 1103515245 + 12345;
  90. if (next.compare_exchange_strong(current_next, new_next)) {
  91. u.value = new_next;
  92. break;
  93. }
  94. }
  95. offset = 0;
  96. }
  97. buffer[i] = u.bytes[offset++];
  98. }
  99. }
  100. bool get_good_random_bytes(u8* buffer, size_t buffer_size, bool allow_wait, bool fallback_to_fast)
  101. {
  102. bool result = false;
  103. auto& kernel_rng = KernelRng::the();
  104. // FIXME: What if interrupts are disabled because we're in an interrupt?
  105. bool can_wait = are_interrupts_enabled();
  106. if (!can_wait && allow_wait) {
  107. // If we can't wait but the caller would be ok with it, then we
  108. // need to definitely fallback to *something*, even if it's less
  109. // secure...
  110. fallback_to_fast = true;
  111. }
  112. if (can_wait && allow_wait) {
  113. for (;;) {
  114. {
  115. Locker locker(KernelRng::the().lock());
  116. if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
  117. result = true;
  118. break;
  119. }
  120. }
  121. kernel_rng.wait_for_entropy();
  122. }
  123. } else {
  124. // We can't wait/block here, or we are not allowed to block/wait
  125. if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
  126. result = true;
  127. } else if (fallback_to_fast) {
  128. // If interrupts are disabled
  129. do_get_fast_random_bytes(buffer, buffer_size);
  130. result = true;
  131. }
  132. }
  133. // NOTE: The only case where this function should ever return false and
  134. // not actually return random data is if fallback_to_fast == false and
  135. // allow_wait == false and interrupts are enabled!
  136. VERIFY(result || !fallback_to_fast);
  137. return result;
  138. }
  139. void get_fast_random_bytes(u8* buffer, size_t buffer_size)
  140. {
  141. // Try to get good randomness, but don't block if we can't right now
  142. // and allow falling back to fast randomness
  143. auto result = get_good_random_bytes(buffer, buffer_size, false, true);
  144. VERIFY(result);
  145. }
  146. }