Prechádzať zdrojové kódy

Kernel: Add a SpinLock to the WaitQueue

We need to be able to prevent a WaitQueue from being
modified by another CPU. So, add a SpinLock to it.

Because this pushes some other class over the 64 byte
limit, we also need to add another 128-byte bucket to
the slab allocator.
Tom 5 rokov pred
rodič
commit
49f5069b76

+ 6 - 0
Kernel/Heap/SlabAllocator.cpp

@@ -114,6 +114,7 @@ private:
 static SlabAllocator<16> s_slab_allocator_16;
 static SlabAllocator<32> s_slab_allocator_32;
 static SlabAllocator<64> s_slab_allocator_64;
+static SlabAllocator<128> s_slab_allocator_128;
 
 static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size());
 
@@ -130,6 +131,7 @@ void slab_alloc_init()
     s_slab_allocator_16.init(128 * KB);
     s_slab_allocator_32.init(128 * KB);
     s_slab_allocator_64.init(512 * KB);
+    s_slab_allocator_128.init(512 * KB);
 }
 
 void* slab_alloc(size_t slab_size)
@@ -140,6 +142,8 @@ void* slab_alloc(size_t slab_size)
         return s_slab_allocator_32.alloc();
     if (slab_size <= 64)
         return s_slab_allocator_64.alloc();
+    if (slab_size <= 128)
+        return s_slab_allocator_128.alloc();
     ASSERT_NOT_REACHED();
 }
 
@@ -151,6 +155,8 @@ void slab_dealloc(void* ptr, size_t slab_size)
         return s_slab_allocator_32.dealloc(ptr);
     if (slab_size <= 64)
         return s_slab_allocator_64.dealloc(ptr);
+    if (slab_size <= 128)
+        return s_slab_allocator_128.dealloc(ptr);
     ASSERT_NOT_REACHED();
 }
 

+ 5 - 5
Kernel/WaitQueue.cpp

@@ -39,13 +39,13 @@ WaitQueue::~WaitQueue()
 
 void WaitQueue::enqueue(Thread& thread)
 {
-    ScopedCritical critical;
+    ScopedSpinLock queue_lock(m_lock);
     m_threads.append(thread);
 }
 
 void WaitQueue::wake_one(Atomic<bool>* lock)
 {
-    ScopedCritical critical;
+    ScopedSpinLock queue_lock(m_lock);
     if (lock)
         *lock = false;
     if (m_threads.is_empty())
@@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
 
 void WaitQueue::wake_n(i32 wake_count)
 {
-    ScopedCritical critical;
+    ScopedSpinLock queue_lock(m_lock);
     if (m_threads.is_empty())
         return;
 
@@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count)
 
 void WaitQueue::wake_all()
 {
-    ScopedCritical critical;
+    ScopedSpinLock queue_lock(m_lock);
     if (m_threads.is_empty())
         return;
     while (!m_threads.is_empty())
@@ -82,7 +82,7 @@ void WaitQueue::wake_all()
 
 void WaitQueue::clear()
 {
-    ScopedCritical critical;
+    ScopedSpinLock queue_lock(m_lock);
     m_threads.clear();
 }
 

+ 2 - 0
Kernel/WaitQueue.h

@@ -28,6 +28,7 @@
 
 #include <AK/Atomic.h>
 #include <AK/SinglyLinkedList.h>
+#include <Kernel/SpinLock.h>
 #include <Kernel/Thread.h>
 
 namespace Kernel {
@@ -46,6 +47,7 @@ public:
 private:
     typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
     ThreadList m_threads;
+    SpinLock<u32> m_lock;
 };
 
 }