瀏覽代碼

LibC: Rewrite pthread_mutex

pthread_mutex is now an actual "sleeping" mutex, and not just a
spinlock! It still has a fast path that only uses atomics and (in the
successful case) returns immediately without sleeping. In case of
contention, it calls futex_wait(), which lets the kernel scheduler put
this thread to sleep, *and* lets it know exactly when to consider
scheduling it again.
Sergey Bugaev 4 年之前
父節點
當前提交
19bef90923
共有 2 個文件被更改,包括 70 次插入38 次删除
  1. 1 1
      Userland/Libraries/LibC/bits/pthread_integration.h
  2. 69 37
      Userland/Libraries/LibC/pthread_integration.cpp

+ 1 - 1
Userland/Libraries/LibC/bits/pthread_integration.h

@@ -18,10 +18,10 @@ void __pthread_fork_atfork_register_prepare(void (*)(void));
 void __pthread_fork_atfork_register_parent(void (*)(void));
 void __pthread_fork_atfork_register_child(void (*)(void));
 
+int __pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*);
 int __pthread_mutex_lock(pthread_mutex_t*);
 int __pthread_mutex_trylock(pthread_mutex_t*);
 int __pthread_mutex_unlock(pthread_mutex_t*);
-int __pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*);
 
 typedef void (*KeyDestructor)(void*);
 

+ 69 - 37
Userland/Libraries/LibC/pthread_integration.cpp

@@ -6,10 +6,12 @@
 
 #include <AK/Atomic.h>
 #include <AK/NeverDestroyed.h>
+#include <AK/Types.h>
 #include <AK/Vector.h>
 #include <bits/pthread_integration.h>
 #include <errno.h>
 #include <sched.h>
+#include <serenity.h>
 #include <unistd.h>
 
 namespace {
@@ -91,65 +93,95 @@ int __pthread_self()
 
 int pthread_self() __attribute__((weak, alias("__pthread_self")));
 
-int __pthread_mutex_lock(pthread_mutex_t* mutex)
+static constexpr u32 MUTEX_UNLOCKED = 0;
+static constexpr u32 MUTEX_LOCKED_NO_NEED_TO_WAKE = 1;
+static constexpr u32 MUTEX_LOCKED_NEED_TO_WAKE = 2;
+
+int __pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
 {
-    pthread_t this_thread = __pthread_self();
-    for (;;) {
-        u32 expected = 0;
-        if (!AK::atomic_compare_exchange_strong(&mutex->lock, expected, 1u, AK::memory_order_acquire)) {
-            if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->owner == this_thread) {
-                mutex->level++;
-                return 0;
-            }
-            sched_yield();
-            continue;
-        }
-        mutex->owner = this_thread;
-        mutex->level = 0;
-        return 0;
-    }
+    mutex->lock = 0;
+    mutex->owner = 0;
+    mutex->level = 0;
+    mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
+    return 0;
 }
 
-int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_lock")));
+int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak, alias("__pthread_mutex_init")));
 
-int __pthread_mutex_unlock(pthread_mutex_t* mutex)
+int __pthread_mutex_trylock(pthread_mutex_t* mutex)
 {
-    if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
-        mutex->level--;
+    u32 expected = MUTEX_UNLOCKED;
+    bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, expected, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
+
+    if (exchanged) [[likely]] {
+        AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
+        mutex->level = 0;
         return 0;
+    } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
+        pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
+        if (owner == __pthread_self()) {
+            // We already own the mutex!
+            mutex->level++;
+            return 0;
+        }
     }
-    mutex->owner = 0;
-    AK::atomic_store(&mutex->lock, 0u, AK::memory_order_release);
-    return 0;
+    return EBUSY;
 }
 
-int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_unlock")));
+int pthread_mutex_trylock(pthread_mutex_t* mutex) __attribute__((weak, alias("__pthread_mutex_trylock")));
 
-int __pthread_mutex_trylock(pthread_mutex_t* mutex)
+int __pthread_mutex_lock(pthread_mutex_t* mutex)
 {
-    u32 expected = 0;
-    if (!AK::atomic_compare_exchange_strong(&mutex->lock, expected, 1u, AK::memory_order_acquire)) {
-        if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->owner == pthread_self()) {
+    pthread_t this_thread = __pthread_self();
+
+    // Fast path: attempt to claim the mutex without waiting.
+    u32 value = MUTEX_UNLOCKED;
+    bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, value, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
+    if (exchanged) [[likely]] {
+        AK::atomic_store(&mutex->owner, this_thread, AK::memory_order_relaxed);
+        mutex->level = 0;
+        return 0;
+    } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
+        pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
+        if (owner == this_thread) {
+            // We already own the mutex!
             mutex->level++;
             return 0;
         }
-        return EBUSY;
     }
-    mutex->owner = pthread_self();
+
+    // Slow path: wait, record the fact that we're going to wait, and always
+    // remember to wake the next thread up once we release the mutex.
+    if (value != MUTEX_LOCKED_NEED_TO_WAKE)
+        value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
+
+    while (value != MUTEX_UNLOCKED) {
+        futex_wait(&mutex->lock, value, nullptr, 0);
+        value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
+    }
+
+    AK::atomic_store(&mutex->owner, this_thread, AK::memory_order_relaxed);
     mutex->level = 0;
     return 0;
 }
 
-int pthread_mutex_trylock(pthread_mutex_t* mutex) __attribute__((weak, alias("__pthread_mutex_trylock")));
+int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_lock")));
 
-int __pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
+int __pthread_mutex_unlock(pthread_mutex_t* mutex)
 {
-    mutex->lock = 0;
-    mutex->owner = 0;
-    mutex->level = 0;
-    mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
+    if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
+        mutex->level--;
+        return 0;
+    }
+
+    AK::atomic_store(&mutex->owner, 0, AK::memory_order_relaxed);
+
+    u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_UNLOCKED, AK::memory_order_release);
+    if (value == MUTEX_LOCKED_NEED_TO_WAKE) [[unlikely]]
+        futex_wake(&mutex->lock, 1);
+
     return 0;
 }
 
-int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak, alias("__pthread_mutex_init")));
+int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_unlock")));
 }