فهرست منبع

Kernel: Add locks around RangeAllocator

We need to keep multiple processors from changing it at the same time.
Tom 4 سال پیش
والد
کامیت
2b25a89ab5
2فایلهای تغییر یافته به همراه10 افزوده شده و 0 حذف شده
  1. 7 0
      Kernel/VM/RangeAllocator.cpp
  2. 3 0
      Kernel/VM/RangeAllocator.h

+ 7 - 0
Kernel/VM/RangeAllocator.cpp

@@ -44,12 +44,14 @@ void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
     m_total_range = { base, size };
     m_available_ranges.append({ base, size });
 #ifdef VRA_DEBUG
+    ScopedSpinLock lock(m_lock);
     dump();
 #endif
 }
 
 void RangeAllocator::initialize_from_parent(const RangeAllocator& parent_allocator)
 {
+    ScopedSpinLock lock(parent_allocator.m_lock);
     m_total_range = parent_allocator.m_total_range;
     m_available_ranges = parent_allocator.m_available_ranges;
 }
@@ -60,6 +62,7 @@ RangeAllocator::~RangeAllocator()
 
 void RangeAllocator::dump() const
 {
+    ASSERT(m_lock.is_locked());
     dbg() << "RangeAllocator{" << this << "}";
     for (auto& range : m_available_ranges) {
         dbg() << "    " << String::format("%x", range.base().get()) << " -> " << String::format("%x", range.end().get() - 1);
@@ -85,6 +88,7 @@ Vector<Range, 2> Range::carve(const Range& taken)
 
 void RangeAllocator::carve_at_index(int index, const Range& range)
 {
+    ASSERT(m_lock.is_locked());
     auto remaining_parts = m_available_ranges[index].carve(range);
     ASSERT(remaining_parts.size() >= 1);
     m_available_ranges[index] = remaining_parts[0];
@@ -106,6 +110,7 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
     size_t offset_from_effective_base = 0;
 #endif
 
+    ScopedSpinLock lock(m_lock);
     for (size_t i = 0; i < m_available_ranges.size(); ++i) {
         auto& available_range = m_available_ranges[i];
         // FIXME: This check is probably excluding some valid candidates when using a large alignment.
@@ -140,6 +145,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
         return {};
 
     Range allocated_range(base, size);
+    ScopedSpinLock lock(m_lock);
     for (size_t i = 0; i < m_available_ranges.size(); ++i) {
         auto& available_range = m_available_ranges[i];
         if (!available_range.contains(base, size))
@@ -161,6 +167,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
 
 void RangeAllocator::deallocate(Range range)
 {
+    ScopedSpinLock lock(m_lock);
     ASSERT(m_total_range.contains(range));
     ASSERT(range.size());
     ASSERT(range.base() < range.end());

+ 3 - 0
Kernel/VM/RangeAllocator.h

@@ -29,6 +29,7 @@
 #include <AK/String.h>
 #include <AK/Traits.h>
 #include <AK/Vector.h>
+#include <Kernel/SpinLock.h>
 #include <Kernel/VirtualAddress.h>
 
 namespace Kernel {
@@ -92,6 +93,7 @@ public:
 
     bool contains(const Range& range) const
     {
+        ScopedSpinLock lock(m_lock);
         return m_total_range.contains(range);
     }
 
@@ -100,6 +102,7 @@ private:
 
     Vector<Range> m_available_ranges;
     Range m_total_range;
+    mutable SpinLock<u8> m_lock;
 };
 
 inline const LogStream& operator<<(const LogStream& stream, const Range& value)