mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-21 23:20:20 +00:00
AK: Make Weakable non-atomic
Let's not punish single-threaded workloads with the performance cost of atomic weakables. The kernel keeps using LockWeakable.
This commit is contained in:
parent
159f9688dc
commit
53c0038d2c
Notes:
sideshowbarker
2024-07-17 20:58:35 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/53c0038d2c
1 changed files with 6 additions and 40 deletions
|
@ -7,11 +7,9 @@
|
|||
#pragma once
|
||||
|
||||
#include <AK/Assertions.h>
|
||||
#include <AK/Atomic.h>
|
||||
#include <AK/RefCounted.h>
|
||||
#include <AK/RefPtr.h>
|
||||
#include <AK/StdLibExtras.h>
|
||||
#include <sched.h>
|
||||
|
||||
namespace AK {
|
||||
|
||||
|
@ -31,50 +29,18 @@ public:
|
|||
RefPtr<T> strong_ref() const
|
||||
requires(IsBaseOf<RefCountedBase, T>)
|
||||
{
|
||||
RefPtr<T> ref;
|
||||
|
||||
{
|
||||
if (!(m_consumers.fetch_add(1u << 1, AK::MemoryOrder::memory_order_acquire) & 1u)) {
|
||||
T* ptr = (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire);
|
||||
if (ptr && ptr->try_ref())
|
||||
ref = adopt_ref(*ptr);
|
||||
}
|
||||
m_consumers.fetch_sub(1u << 1, AK::MemoryOrder::memory_order_release);
|
||||
}
|
||||
|
||||
return ref;
|
||||
return static_cast<T*>(m_ptr);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* unsafe_ptr() const
|
||||
{
|
||||
if (m_consumers.load(AK::MemoryOrder::memory_order_relaxed) & 1u)
|
||||
return nullptr;
|
||||
// NOTE: This may return a non-null pointer even if revocation
|
||||
// has been triggered as there is a possible race! But it's "unsafe"
|
||||
// anyway because we return a raw pointer without ensuring a
|
||||
// reference...
|
||||
return (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire);
|
||||
return static_cast<T*>(m_ptr);
|
||||
}
|
||||
|
||||
bool is_null() const
|
||||
{
|
||||
return unsafe_ptr<void>() == nullptr;
|
||||
}
|
||||
bool is_null() const { return m_ptr == nullptr; }
|
||||
|
||||
void revoke()
|
||||
{
|
||||
auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed);
|
||||
VERIFY(!(current_consumers & 1u));
|
||||
// We flagged revocation, now wait until everyone trying to obtain
|
||||
// a strong reference is done
|
||||
while (current_consumers > 0) {
|
||||
sched_yield();
|
||||
current_consumers = m_consumers.load(AK::MemoryOrder::memory_order_acquire) & ~1u;
|
||||
}
|
||||
// No one is trying to use it (anymore)
|
||||
m_ptr.store(nullptr, AK::MemoryOrder::memory_order_release);
|
||||
}
|
||||
void revoke() { m_ptr = nullptr; }
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
|
@ -82,8 +48,8 @@ private:
|
|||
: m_ptr(&weakable)
|
||||
{
|
||||
}
|
||||
mutable Atomic<void*> m_ptr;
|
||||
mutable Atomic<unsigned> m_consumers; // LSB indicates revocation in progress
|
||||
|
||||
mutable void* m_ptr { nullptr };
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
|
|
Loading…
Reference in a new issue