2020-01-18 08:38:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
2020-06-23 02:23:35 +00:00
|
|
|
* Copyright (c) 2020, Peter Elliott <pelliott@ualberta.ca>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2020-08-25 01:35:19 +00:00
|
|
|
#include <AK/Singleton.h>
|
2021-06-21 15:34:09 +00:00
|
|
|
#include <Kernel/Arch/x86/Processor.h>
|
2020-01-03 11:36:30 +00:00
|
|
|
#include <Kernel/Devices/RandomDevice.h>
|
2020-02-16 00:27:42 +00:00
|
|
|
#include <Kernel/Random.h>
|
2021-06-22 15:40:16 +00:00
|
|
|
#include <Kernel/Sections.h>
|
2021-01-22 16:11:08 +00:00
|
|
|
#include <Kernel/Time/HPET.h>
|
2021-01-24 11:25:03 +00:00
|
|
|
#include <Kernel/Time/RTC.h>
|
2020-06-24 20:07:28 +00:00
|
|
|
#include <Kernel/Time/TimeManagement.h>
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
namespace Kernel {
|
2020-01-03 11:36:30 +00:00
|
|
|
|
2021-08-07 19:34:11 +00:00
|
|
|
static Singleton<KernelRng> s_the;
|
2021-06-23 19:48:51 +00:00
|
|
|
static Atomic<u32, AK::MemoryOrder::memory_order_relaxed> s_next_random_value = 1;
|
2020-06-23 02:23:35 +00:00
|
|
|
|
|
|
|
KernelRng& KernelRng::the()
|
2020-01-03 11:36:30 +00:00
|
|
|
{
|
2020-06-23 02:23:35 +00:00
|
|
|
return *s_the;
|
2020-01-03 11:36:30 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 20:29:46 +00:00
|
|
|
UNMAP_AFTER_INIT KernelRng::KernelRng()
|
2020-01-03 11:36:30 +00:00
|
|
|
{
|
2020-07-03 16:23:09 +00:00
|
|
|
bool supports_rdseed = Processor::current().has_feature(CPUFeature::RDSEED);
|
|
|
|
bool supports_rdrand = Processor::current().has_feature(CPUFeature::RDRAND);
|
|
|
|
if (supports_rdseed || supports_rdrand) {
|
2021-03-09 19:46:04 +00:00
|
|
|
dmesgln("KernelRng: Using RDSEED or RDRAND as entropy source");
|
2020-06-24 20:07:28 +00:00
|
|
|
for (size_t i = 0; i < resource().pool_count * resource().reseed_threshold; ++i) {
|
2020-06-23 02:23:35 +00:00
|
|
|
u32 value = 0;
|
2020-07-03 16:23:09 +00:00
|
|
|
if (supports_rdseed) {
|
2020-06-27 17:10:01 +00:00
|
|
|
asm volatile(
|
|
|
|
"1:\n"
|
|
|
|
"rdseed %0\n"
|
|
|
|
"jnc 1b\n"
|
|
|
|
: "=r"(value));
|
|
|
|
} else {
|
|
|
|
asm volatile(
|
|
|
|
"1:\n"
|
|
|
|
"rdrand %0\n"
|
|
|
|
"jnc 1b\n"
|
|
|
|
: "=r"(value));
|
|
|
|
}
|
2020-06-23 02:23:35 +00:00
|
|
|
|
2020-06-24 20:07:28 +00:00
|
|
|
this->resource().add_random_event(value, i % 32);
|
2020-01-03 11:36:30 +00:00
|
|
|
}
|
2021-01-24 11:25:03 +00:00
|
|
|
} else if (TimeManagement::the().can_query_precise_time()) {
|
2021-01-22 16:11:08 +00:00
|
|
|
// Add HPET as entropy source if we don't have anything better.
|
2021-03-09 19:46:04 +00:00
|
|
|
dmesgln("KernelRng: Using HPET as entropy source");
|
2021-01-22 16:11:08 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < resource().pool_count * resource().reseed_threshold; ++i) {
|
2021-02-28 18:47:03 +00:00
|
|
|
u64 hpet_time = HPET::the().read_main_counter_unsafe();
|
2021-01-22 16:11:08 +00:00
|
|
|
this->resource().add_random_event(hpet_time, i % 32);
|
|
|
|
}
|
2021-01-24 11:25:03 +00:00
|
|
|
} else {
|
|
|
|
// Fallback to RTC
|
2021-03-09 19:46:04 +00:00
|
|
|
dmesgln("KernelRng: Using RTC as entropy source (bad!)");
|
2021-02-11 18:26:00 +00:00
|
|
|
auto current_time = static_cast<u64>(RTC::now());
|
2021-01-24 11:25:03 +00:00
|
|
|
for (size_t i = 0; i < resource().pool_count * resource().reseed_threshold; ++i) {
|
|
|
|
this->resource().add_random_event(current_time, i % 32);
|
2021-02-11 18:26:00 +00:00
|
|
|
current_time *= 0x574au;
|
|
|
|
current_time += 0x40b2u;
|
2021-01-24 11:25:03 +00:00
|
|
|
}
|
2020-01-03 11:36:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-27 17:10:01 +00:00
|
|
|
void KernelRng::wait_for_entropy()
|
|
|
|
{
|
2021-08-21 23:37:17 +00:00
|
|
|
ScopedSpinlock lock(get_lock());
|
2020-06-27 17:10:01 +00:00
|
|
|
if (!resource().is_ready()) {
|
2021-01-24 17:17:54 +00:00
|
|
|
dbgln("Entropy starvation...");
|
2021-02-14 23:02:14 +00:00
|
|
|
m_seed_queue.wait_forever("KernelRng");
|
2020-06-27 17:10:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelRng::wake_if_ready()
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(get_lock().is_locked());
|
2020-06-27 17:10:01 +00:00
|
|
|
if (resource().is_ready()) {
|
|
|
|
m_seed_queue.wake_all();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-24 17:17:54 +00:00
|
|
|
size_t EntropySource::next_source { static_cast<size_t>(EntropySource::Static::MaxHardcodedSourceIndex) };
|
2020-06-24 20:07:28 +00:00
|
|
|
|
2021-01-27 20:16:30 +00:00
|
|
|
static void do_get_fast_random_bytes(u8* buffer, size_t buffer_size)
|
2020-06-23 02:23:35 +00:00
|
|
|
{
|
2020-06-27 17:10:01 +00:00
|
|
|
|
|
|
|
union {
|
|
|
|
u8 bytes[4];
|
|
|
|
u32 value;
|
|
|
|
} u;
|
|
|
|
size_t offset = 4;
|
|
|
|
for (size_t i = 0; i < buffer_size; ++i) {
|
|
|
|
if (offset >= 4) {
|
2021-06-23 19:48:51 +00:00
|
|
|
auto current_next = s_next_random_value.load();
|
2021-01-27 20:16:30 +00:00
|
|
|
for (;;) {
|
|
|
|
auto new_next = current_next * 1103515245 + 12345;
|
2021-06-23 19:48:51 +00:00
|
|
|
if (s_next_random_value.compare_exchange_strong(current_next, new_next)) {
|
2021-01-27 20:16:30 +00:00
|
|
|
u.value = new_next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-06-27 17:10:01 +00:00
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
buffer[i] = u.bytes[offset++];
|
|
|
|
}
|
2020-01-03 11:36:30 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
2021-01-27 20:16:30 +00:00
|
|
|
bool get_good_random_bytes(u8* buffer, size_t buffer_size, bool allow_wait, bool fallback_to_fast)
|
|
|
|
{
|
|
|
|
bool result = false;
|
|
|
|
auto& kernel_rng = KernelRng::the();
|
|
|
|
// FIXME: What if interrupts are disabled because we're in an interrupt?
|
|
|
|
bool can_wait = are_interrupts_enabled();
|
|
|
|
if (!can_wait && allow_wait) {
|
|
|
|
// If we can't wait but the caller would be ok with it, then we
|
|
|
|
// need to definitely fallback to *something*, even if it's less
|
|
|
|
// secure...
|
|
|
|
fallback_to_fast = true;
|
|
|
|
}
|
|
|
|
if (can_wait && allow_wait) {
|
|
|
|
for (;;) {
|
|
|
|
{
|
2021-07-17 23:13:34 +00:00
|
|
|
MutexLocker locker(KernelRng::the().lock());
|
2021-01-27 20:16:30 +00:00
|
|
|
if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kernel_rng.wait_for_entropy();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We can't wait/block here, or we are not allowed to block/wait
|
|
|
|
if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
|
|
|
|
result = true;
|
|
|
|
} else if (fallback_to_fast) {
|
|
|
|
// If interrupts are disabled
|
|
|
|
do_get_fast_random_bytes(buffer, buffer_size);
|
|
|
|
result = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: The only case where this function should ever return false and
|
|
|
|
// not actually return random data is if fallback_to_fast == false and
|
|
|
|
// allow_wait == false and interrupts are enabled!
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(result || !fallback_to_fast);
|
2021-01-27 20:16:30 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void get_fast_random_bytes(u8* buffer, size_t buffer_size)
|
|
|
|
{
|
|
|
|
// Try to get good randomness, but don't block if we can't right now
|
|
|
|
// and allow falling back to fast randomness
|
|
|
|
auto result = get_good_random_bytes(buffer, buffer_size, false, true);
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(result);
|
2021-01-27 20:16:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|