Explorar el Código

Kernel: Make KernelRng thread-safe

This adds an optional argument to get_good_random_bytes that can be
used to only return randomness if it doesn't have to block.
Also add a SpinLock around using FortunaPRNG.

Fixes #5132
Tom hace 4 años
padre
commit
e2d7945e0c
Se han modificado 2 ficheros con 77 adiciones y 25 borrados
  1. 61 21
      Kernel/Random.cpp
  2. 16 4
      Kernel/Random.h

+ 61 - 21
Kernel/Random.cpp

@@ -88,6 +88,7 @@ KernelRng::KernelRng()
 
 void KernelRng::wait_for_entropy()
 {
+    ScopedSpinLock lock(get_lock());
     if (!resource().is_ready()) {
         dbgln("Entropy starvation...");
         m_seed_queue.wait_on({}, "KernelRng");
@@ -96,6 +97,7 @@ void KernelRng::wait_for_entropy()
 
 void KernelRng::wake_if_ready()
 {
+    ASSERT(get_lock().is_locked());
     if (resource().is_ready()) {
         m_seed_queue.wake_all();
     }
@@ -103,26 +105,9 @@ void KernelRng::wake_if_ready()
 
 size_t EntropySource::next_source { static_cast<size_t>(EntropySource::Static::MaxHardcodedSourceIndex) };
 
-void get_good_random_bytes(u8* buffer, size_t buffer_size)
+static void do_get_fast_random_bytes(u8* buffer, size_t buffer_size)
 {
-    KernelRng::the().wait_for_entropy();
-
-    // FIXME: What if interrupts are disabled because we're in an interrupt?
-    if (are_interrupts_enabled()) {
-        LOCKER(KernelRng::the().lock());
-        KernelRng::the().resource().get_random_bytes(buffer, buffer_size);
-    } else {
-        KernelRng::the().resource().get_random_bytes(buffer, buffer_size);
-    }
-}
-
-void get_fast_random_bytes(u8* buffer, size_t buffer_size)
-{
-    if (KernelRng::the().resource().is_ready()) {
-        return get_good_random_bytes(buffer, buffer_size);
-    }
-
-    static u32 next = 1;
+    static Atomic<u32, AK::MemoryOrder::memory_order_relaxed> next = 1;
 
     union {
         u8 bytes[4];
@@ -131,12 +116,67 @@ void get_fast_random_bytes(u8* buffer, size_t buffer_size)
     size_t offset = 4;
     for (size_t i = 0; i < buffer_size; ++i) {
         if (offset >= 4) {
-            next = next * 1103515245 + 12345;
-            u.value = next;
+            auto current_next = next.load();
+            for (;;) {
+                auto new_next = current_next * 1103515245 + 12345;
+                if (next.compare_exchange_strong(current_next, new_next)) {
+                    u.value = new_next;
+                    break;
+                }
+            }
             offset = 0;
         }
         buffer[i] = u.bytes[offset++];
     }
 }
 
+bool get_good_random_bytes(u8* buffer, size_t buffer_size, bool allow_wait, bool fallback_to_fast)
+{
+    bool result = false;
+    auto& kernel_rng = KernelRng::the();
+    // FIXME: What if interrupts are disabled because we're in an interrupt?
+    bool can_wait = are_interrupts_enabled();
+    if (!can_wait && allow_wait) {
+        // If we can't wait but the caller would be ok with it, then we
+        // need to definitely fallback to *something*, even if it's less
+        // secure...
+        fallback_to_fast = true;
+    }
+    if (can_wait && allow_wait) {
+        for (;;) {
+            {
+                LOCKER(KernelRng::the().lock());
+                if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
+                    result = true;
+                    break;
+                }
+            }
+            kernel_rng.wait_for_entropy();
+        }
+    } else {
+        // We can't wait/block here, or we are not allowed to block/wait
+        if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
+            result = true;
+        } else if (fallback_to_fast) {
+            // If interrupts are disabled
+            do_get_fast_random_bytes(buffer, buffer_size);
+            result = true;
+        }
+    }
+
+    // NOTE: The only case where this function should ever return false and
+    // not actually return random data is if fallback_to_fast == false and
+    // allow_wait == false and interrupts are enabled!
+    ASSERT(result || !fallback_to_fast);
+    return result;
+}
+
+void get_fast_random_bytes(u8* buffer, size_t buffer_size)
+{
+    // Try to get good randomness, but don't block if we can't right now
+    // and allow falling back to fast randomness
+    auto result = get_good_random_bytes(buffer, buffer_size, false, true);
+    ASSERT(result);
+}
+
 }

+ 16 - 4
Kernel/Random.h

@@ -55,8 +55,11 @@ public:
     {
     }
 
-    void get_random_bytes(u8* buffer, size_t n)
+    bool get_random_bytes(u8* buffer, size_t n)
     {
+        ScopedSpinLock lock(m_lock);
+        if (!is_ready())
+            return false;
         if (m_p0_len >= reseed_threshold) {
             this->reseed();
         }
@@ -75,6 +78,7 @@ public:
         // Extract a new key from the prng stream.
         Bytes key_span = m_key.bytes();
         cipher.key_stream(key_span, counter_span, &counter_span);
+        return true;
     }
 
     template<typename T>
@@ -94,9 +98,12 @@ public:
 
     [[nodiscard]] bool is_ready() const
     {
+        ASSERT(m_lock.is_locked());
         return is_seeded() || m_p0_len >= reseed_threshold;
     }
 
+    SpinLock<u8>& get_lock() { return m_lock; }
+
 private:
     void reseed()
     {
@@ -121,6 +128,7 @@ private:
     size_t m_p0_len { 0 };
     ByteBuffer m_key;
     HashType m_pools[pool_count];
+    SpinLock<u8> m_lock;
 };
 
 class KernelRng : public Lockable<FortunaPRNG<Crypto::Cipher::AESCipher, Crypto::Hash::SHA256, 256>> {
@@ -134,6 +142,8 @@ public:
 
     void wake_if_ready();
 
+    SpinLock<u8>& get_lock() { return resource().get_lock(); }
+
 private:
     WaitQueue m_seed_queue;
 };
@@ -165,11 +175,13 @@ public:
     template<typename T>
     void add_random_event(const T& event_data)
     {
+        auto& kernel_rng = KernelRng::the();
+        ScopedSpinLock lock(kernel_rng.get_lock());
         // We don't lock this because on the off chance a pool is corrupted, entropy isn't lost.
         Event<T> event = { read_tsc(), m_source, event_data };
-        KernelRng::the().resource().add_random_event(event, m_pool);
+        kernel_rng.resource().add_random_event(event, m_pool);
         m_pool++;
-        KernelRng::the().wake_if_ready();
+        kernel_rng.wake_if_ready();
     }
 
 private:
@@ -182,7 +194,7 @@ private:
 //       The only difference is that get_fast_random is guaranteed not to block.
 
 void get_fast_random_bytes(u8*, size_t);
-void get_good_random_bytes(u8*, size_t);
+bool get_good_random_bytes(u8*, size_t, bool allow_wait = true, bool fallback_to_fast = true);
 
 template<typename T>
 inline T get_fast_random()