Browse Source

Kernel: Use RefPtr instead of LockRefPtr for PhysicalPage

I believe this to be safe, as the main thing that LockRefPtr provides
over RefPtr is safe copying from a shared LockRefPtr instance. I've
inspected the uses of RefPtr<PhysicalPage> and it seems they're all
guarded by external locking. Some of it is less obvious, but this is
an area where we're making continuous headway.
Andreas Kling 2 years ago
parent
commit
2c72d495a3

+ 12 - 11
Kernel/Memory/AnonymousVMObject.cpp

@@ -4,6 +4,7 @@
  * SPDX-License-Identifier: BSD-2-Clause
  */
 
+#include <AK/NonnullRefPtrVector.h>
 #include <Kernel/Arch/SafeMem.h>
 #include <Kernel/Arch/SmapDisabler.h>
 #include <Kernel/Debug.h>
@@ -91,7 +92,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_phys
 {
     auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size));
 
-    auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
+    auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
 
     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
 }
@@ -110,9 +111,9 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purg
     return vmobject;
 }
 
-ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>> physical_pages)
+ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
 {
-    auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(physical_pages));
+    auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(physical_pages));
     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
 }
 
@@ -129,7 +130,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_
     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages)));
 }
 
-ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
 {
     auto weak_parent = TRY(other.try_make_weak_ptr<AnonymousVMObject>());
     auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages))));
@@ -139,7 +140,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with
     return vmobject;
 }
 
-AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
+AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
     : VMObject(move(new_physical_pages))
     , m_unused_committed_pages(move(committed_pages))
 {
@@ -154,7 +155,7 @@ AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_
     }
 }
 
-AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
     : VMObject(move(new_physical_pages))
 {
     VERIFY(paddr.page_base() == paddr);
@@ -162,12 +163,12 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPt
         physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No);
 }
 
-AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
     : VMObject(move(new_physical_pages))
 {
 }
 
-AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
     : VMObject(move(new_physical_pages))
     , m_cow_parent(move(other))
     , m_shared_committed_cow_pages(move(shared_committed_cow_pages))
@@ -270,7 +271,7 @@ ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged
     return {};
 }
 
-NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
+NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
 {
     return m_unused_committed_pages->take_one();
 }
@@ -344,7 +345,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
         return PageFaultResponse::Continue;
     }
 
-    LockRefPtr<PhysicalPage> page;
+    RefPtr<PhysicalPage> page;
     if (m_shared_committed_cow_pages) {
         dbgln_if(PAGE_FAULT_DEBUG, "    >> It's a committed COW page and it's time to COW!");
         page = m_shared_committed_cow_pages->take_one();
@@ -387,7 +388,7 @@ AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhy
 
 AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default;
 
-NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
+NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
 {
     SpinlockLocker locker(m_lock);
     return m_committed_pages.take_one();

+ 8 - 8
Kernel/Memory/AnonymousVMObject.h

@@ -20,12 +20,12 @@ public:
 
     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
-    static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>>);
+    static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
     virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
 
-    [[nodiscard]] NonnullLockRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
+    [[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
     PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
     size_t cow_pages() const;
     bool should_cow(size_t page_index, bool) const;
@@ -41,12 +41,12 @@ public:
 private:
     class SharedCommittedCowPages;
 
-    static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&);
+    static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
 
-    explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
-    explicit AnonymousVMObject(PhysicalAddress, FixedArray<LockRefPtr<PhysicalPage>>&&);
-    explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&);
-    explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&);
+    explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
+    explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
+    explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&);
+    explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
 
     virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
 
@@ -74,7 +74,7 @@ private:
 
         [[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); }
 
-        [[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one();
+        [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
         void uncommit_one();
 
     private:

+ 2 - 2
Kernel/Memory/InodeVMObject.cpp

@@ -9,14 +9,14 @@
 
 namespace Kernel::Memory {
 
-InodeVMObject::InodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
+InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
     : VMObject(move(new_physical_pages))
     , m_inode(inode)
     , m_dirty_pages(move(dirty_pages))
 {
 }
 
-InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
+InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
     : VMObject(move(new_physical_pages))
     , m_inode(other.m_inode)
     , m_dirty_pages(move(dirty_pages))

+ 2 - 2
Kernel/Memory/InodeVMObject.h

@@ -28,8 +28,8 @@ public:
     u32 writable_mappings() const;
 
 protected:
-    explicit InodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
-    explicit InodeVMObject(InodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
+    explicit InodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
+    explicit InodeVMObject(InodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
 
     InodeVMObject& operator=(InodeVMObject const&) = delete;
     InodeVMObject& operator=(InodeVMObject&&) = delete;

+ 11 - 10
Kernel/Memory/MemoryManager.cpp

@@ -6,6 +6,7 @@
 
 #include <AK/Assertions.h>
 #include <AK/Memory.h>
+#include <AK/NonnullRefPtrVector.h>
 #include <AK/StringView.h>
 #include <Kernel/Arch/CPU.h>
 #include <Kernel/Arch/InterruptDisabler.h>
@@ -746,7 +747,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
     return region;
 }
 
-ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page)
+ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
 {
     dma_buffer_page = TRY(allocate_physical_page());
     // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
@@ -755,12 +756,12 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
 
 ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
 {
-    LockRefPtr<Memory::PhysicalPage> dma_buffer_page;
+    RefPtr<Memory::PhysicalPage> dma_buffer_page;
 
     return allocate_dma_buffer_page(name, access, dma_buffer_page);
 }
 
-ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
+ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
 {
     VERIFY(!(size % PAGE_SIZE));
     dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
@@ -771,7 +772,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
 ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
 {
     VERIFY(!(size % PAGE_SIZE));
-    NonnullLockRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
+    NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
 
     return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
 }
@@ -881,10 +882,10 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
     PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr);
 }
 
-LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
+RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
 {
     SpinlockLocker mm_locker(s_mm_lock);
-    LockRefPtr<PhysicalPage> page;
+    RefPtr<PhysicalPage> page;
     if (committed) {
         // Draw from the committed pages pool. We should always have these pages available
         VERIFY(m_system_memory_info.physical_pages_committed > 0);
@@ -906,7 +907,7 @@ LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
     return page;
 }
 
-NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
+NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
 {
     auto page = find_free_physical_page(true);
     if (should_zero_fill == ShouldZeroFill::Yes) {
@@ -918,7 +919,7 @@ NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(
     return page.release_nonnull();
 }
 
-ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
+ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
 {
     SpinlockLocker lock(s_mm_lock);
     auto page = find_free_physical_page(false);
@@ -974,7 +975,7 @@ ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(S
     return page.release_nonnull();
 }
 
-ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
+ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
 {
     VERIFY(!(size % PAGE_SIZE));
     SpinlockLocker mm_lock(s_mm_lock);
@@ -1160,7 +1161,7 @@ CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
         MM.uncommit_physical_pages({}, m_page_count);
 }
 
-NonnullLockRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
+NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
 {
     VERIFY(m_page_count > 0);
     --m_page_count;

+ 9 - 9
Kernel/Memory/MemoryManager.h

@@ -119,7 +119,7 @@ public:
     bool is_empty() const { return m_page_count == 0; }
     size_t page_count() const { return m_page_count; }
 
-    [[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one();
+    [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
     void uncommit_one();
 
     void operator=(CommittedPhysicalPageSet&&) = delete;
@@ -169,15 +169,15 @@ public:
     ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count);
     void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
 
-    NonnullLockRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
-    ErrorOr<NonnullLockRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
-    ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
+    NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
+    ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
+    ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
     void deallocate_physical_page(PhysicalAddress);
 
     ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
-    ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page);
+    ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
-    ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
+    ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
     ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
     ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
@@ -258,7 +258,7 @@ private:
 
     static Region* find_region_from_vaddr(VirtualAddress);
 
-    LockRefPtr<PhysicalPage> find_free_physical_page(bool);
+    RefPtr<PhysicalPage> find_free_physical_page(bool);
 
     ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
     {
@@ -286,8 +286,8 @@ private:
 
     LockRefPtr<PageDirectory> m_kernel_page_directory;
 
-    LockRefPtr<PhysicalPage> m_shared_zero_page;
-    LockRefPtr<PhysicalPage> m_lazy_committed_page;
+    RefPtr<PhysicalPage> m_shared_zero_page;
+    RefPtr<PhysicalPage> m_lazy_committed_page;
 
     SystemMemoryInfo m_system_memory_info;
 

+ 5 - 5
Kernel/Memory/PageDirectory.h

@@ -10,8 +10,8 @@
 #include <AK/Badge.h>
 #include <AK/HashMap.h>
 #include <AK/IntrusiveRedBlackTree.h>
+#include <AK/RefPtr.h>
 #include <Kernel/Forward.h>
-#include <Kernel/Library/LockRefPtr.h>
 #include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/PhysicalPage.h>
 
@@ -64,13 +64,13 @@ private:
 
     AddressSpace* m_space { nullptr };
 #if ARCH(X86_64)
-    LockRefPtr<PhysicalPage> m_pml4t;
+    RefPtr<PhysicalPage> m_pml4t;
 #endif
-    LockRefPtr<PhysicalPage> m_directory_table;
+    RefPtr<PhysicalPage> m_directory_table;
 #if ARCH(X86_64)
-    LockRefPtr<PhysicalPage> m_directory_pages[512];
+    RefPtr<PhysicalPage> m_directory_pages[512];
 #else
-    LockRefPtr<PhysicalPage> m_directory_pages[4];
+    RefPtr<PhysicalPage> m_directory_pages[4];
 #endif
     RecursiveSpinlock m_lock { LockRank::None };
 };

+ 2 - 2
Kernel/Memory/PhysicalPage.cpp

@@ -10,10 +10,10 @@
 
 namespace Kernel::Memory {
 
-NonnullLockRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
+NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
 {
     auto& physical_page_entry = MM.get_physical_page_entry(paddr);
-    return adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
+    return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
 }
 
 PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist)

+ 3 - 3
Kernel/Memory/PhysicalPage.h

@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
  *
  * SPDX-License-Identifier: BSD-2-Clause
  */
 
 #pragma once
 
-#include <Kernel/Library/NonnullLockRefPtr.h>
+#include <AK/NonnullRefPtr.h>
 #include <Kernel/PhysicalAddress.h>
 
 namespace Kernel::Memory {
@@ -36,7 +36,7 @@ public:
             free_this();
     }
 
-    static NonnullLockRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
+    static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
 
     u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
 

+ 4 - 5
Kernel/Memory/PhysicalRegion.cpp

@@ -5,9 +5,8 @@
  */
 
 #include <AK/BuiltinWrappers.h>
+#include <AK/NonnullRefPtrVector.h>
 #include <Kernel/Assertions.h>
-#include <Kernel/Library/LockRefPtr.h>
-#include <Kernel/Library/NonnullLockRefPtr.h>
 #include <Kernel/Memory/MemoryManager.h>
 #include <Kernel/Memory/PhysicalRegion.h>
 #include <Kernel/Memory/PhysicalZone.h>
@@ -76,7 +75,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(unsigned pa
     return try_create(taken_lower, taken_upper);
 }
 
-NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
+NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
 {
     auto rounded_page_count = next_power_of_two(count);
     auto order = count_trailing_zeroes(rounded_page_count);
@@ -96,7 +95,7 @@ NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages
     if (!page_base.has_value())
         return {};
 
-    NonnullLockRefPtrVector<PhysicalPage> physical_pages;
+    NonnullRefPtrVector<PhysicalPage> physical_pages;
     physical_pages.ensure_capacity(count);
 
     for (size_t i = 0; i < count; ++i)
@@ -104,7 +103,7 @@ NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages
     return physical_pages;
 }
 
-LockRefPtr<PhysicalPage> PhysicalRegion::take_free_page()
+RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
 {
     if (m_usable_zones.is_empty())
         return nullptr;

+ 2 - 2
Kernel/Memory/PhysicalRegion.h

@@ -33,8 +33,8 @@ public:
 
     OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned);
 
-    LockRefPtr<PhysicalPage> take_free_page();
-    NonnullLockRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
+    RefPtr<PhysicalPage> take_free_page();
+    NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
     void return_page(PhysicalAddress);
 
 private:

+ 2 - 2
Kernel/Memory/PrivateInodeVMObject.cpp

@@ -23,12 +23,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
     return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
 }
 
-PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
+PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
     : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
 {
 }
 
-PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
+PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
     : InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
 {
 }

+ 2 - 2
Kernel/Memory/PrivateInodeVMObject.h

@@ -23,8 +23,8 @@ public:
 private:
     virtual bool is_private_inode() const override { return true; }
 
-    explicit PrivateInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
-    explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
+    explicit PrivateInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
+    explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
 
     virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; }
 

+ 6 - 6
Kernel/Memory/Region.cpp

@@ -202,7 +202,7 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
     return {};
 }
 
-bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage> page)
+bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
 {
     VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
 
@@ -240,7 +240,7 @@ bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage
 
 bool Region::map_individual_page_impl(size_t page_index)
 {
-    LockRefPtr<PhysicalPage> page;
+    RefPtr<PhysicalPage> page;
     {
         SpinlockLocker vmobject_locker(vmobject().m_lock);
         page = physical_page(page_index);
@@ -249,7 +249,7 @@ bool Region::map_individual_page_impl(size_t page_index)
     return map_individual_page_impl(page_index, page);
 }
 
-bool Region::remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage> physical_page)
+bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page)
 {
     SpinlockLocker page_lock(m_page_directory->get_lock());
 
@@ -408,7 +408,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica
     if (current_thread != nullptr)
         current_thread->did_zero_fault();
 
-    LockRefPtr<PhysicalPage> new_physical_page;
+    RefPtr<PhysicalPage> new_physical_page;
 
     if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
         VERIFY(m_vmobject->is_anonymous());
@@ -543,14 +543,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
     return PageFaultResponse::Continue;
 }
 
-LockRefPtr<PhysicalPage> Region::physical_page(size_t index) const
+RefPtr<PhysicalPage> Region::physical_page(size_t index) const
 {
     SpinlockLocker vmobject_locker(vmobject().m_lock);
     VERIFY(index < page_count());
     return vmobject().physical_pages()[first_page_index() + index];
 }
 
-LockRefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
+RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
 {
     VERIFY(vmobject().m_lock.is_locked_by_current_processor());
     VERIFY(index < page_count());

+ 4 - 4
Kernel/Memory/Region.h

@@ -158,8 +158,8 @@ public:
         return size() / PAGE_SIZE;
     }
 
-    LockRefPtr<PhysicalPage> physical_page(size_t index) const;
-    LockRefPtr<PhysicalPage>& physical_page_slot(size_t index);
+    RefPtr<PhysicalPage> physical_page(size_t index) const;
+    RefPtr<PhysicalPage>& physical_page_slot(size_t index);
 
     [[nodiscard]] size_t offset_in_vmobject() const
     {
@@ -208,7 +208,7 @@ private:
     Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
     Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
 
-    [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage>);
+    [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage>);
 
     void set_access_bit(Access access, bool b)
     {
@@ -223,7 +223,7 @@ private:
     [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
 
     [[nodiscard]] bool map_individual_page_impl(size_t page_index);
-    [[nodiscard]] bool map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage>);
+    [[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);
 
     LockRefPtr<PageDirectory> m_page_directory;
     VirtualRange m_range;

+ 1 - 1
Kernel/Memory/ScatterGatherList.cpp

@@ -8,7 +8,7 @@
 
 namespace Kernel::Memory {
 
-LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
+LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
 {
     auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
     if (maybe_vm_object.is_error()) {

+ 1 - 1
Kernel/Memory/ScatterGatherList.h

@@ -19,7 +19,7 @@ namespace Kernel::Memory {
 
 class ScatterGatherList final : public AtomicRefCounted<ScatterGatherList> {
 public:
-    static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
+    static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
     VMObject const& vmobject() const { return m_vm_object; }
     VirtualAddress dma_region() const { return m_dma_region->vaddr(); }
     size_t scatters_count() const { return m_vm_object->physical_pages().size(); }

+ 7 - 7
Kernel/Memory/SharedFramebufferVMObject.cpp

@@ -56,21 +56,21 @@ ErrorOr<void> SharedFramebufferVMObject::create_real_writes_framebuffer_vm_objec
     return {};
 }
 
-Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
+Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
 {
     return m_real_framebuffer_vmobject->physical_pages();
 }
-Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
+Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
 {
     return m_real_framebuffer_vmobject->physical_pages();
 }
 
-Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
+Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
 {
     return m_physical_pages.span();
 }
 
-Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
+Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
 {
     return m_physical_pages.span();
 }
@@ -92,14 +92,14 @@ void SharedFramebufferVMObject::switch_to_real_framebuffer_writes(Badge<Kernel::
     });
 }
 
-Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const
+Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const
 {
     SpinlockLocker locker(m_writes_state_lock);
     if (m_writes_are_faked)
         return VMObject::physical_pages();
     return m_real_framebuffer_vmobject->physical_pages();
 }
-Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
+Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
 {
     SpinlockLocker locker(m_writes_state_lock);
     if (m_writes_are_faked)
@@ -107,7 +107,7 @@ Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
     return m_real_framebuffer_vmobject->physical_pages();
 }
 
-SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
+SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
     : VMObject(move(new_physical_pages))
     , m_real_framebuffer_vmobject(real_framebuffer_vmobject)
     , m_committed_pages(move(committed_pages))

+ 13 - 13
Kernel/Memory/SharedFramebufferVMObject.h

@@ -22,15 +22,15 @@ public:
         static ErrorOr<NonnullLockRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
 
     private:
-        FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+        FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
             : VMObject(move(new_physical_pages))
             , m_parent_object(parent_object)
         {
         }
         virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; }
         virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
-        virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
-        virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
+        virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
+        virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
         NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
     };
 
@@ -39,15 +39,15 @@ public:
         static ErrorOr<NonnullLockRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
 
     private:
-        RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+        RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
             : VMObject(move(new_physical_pages))
             , m_parent_object(parent_object)
         {
         }
         virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; }
         virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
-        virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
-        virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
+        virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
+        virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
         NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
     };
 
@@ -60,14 +60,14 @@ public:
     void switch_to_fake_sink_framebuffer_writes(Badge<Kernel::DisplayConnector>);
     void switch_to_real_framebuffer_writes(Badge<Kernel::DisplayConnector>);
 
-    virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override;
-    virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override;
+    virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override;
+    virtual Span<RefPtr<PhysicalPage>> physical_pages() override;
 
-    Span<LockRefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
-    Span<LockRefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const;
+    Span<RefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
+    Span<RefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const;
 
-    Span<LockRefPtr<PhysicalPage>> real_framebuffer_physical_pages();
-    Span<LockRefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const;
+    Span<RefPtr<PhysicalPage>> real_framebuffer_physical_pages();
+    Span<RefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const;
 
     FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; }
     FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; }
@@ -76,7 +76,7 @@ public:
     RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; }
 
 private:
-    SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
+    SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
 
     virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; }
 

+ 2 - 2
Kernel/Memory/SharedInodeVMObject.cpp

@@ -29,12 +29,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone()
     return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
 }
 
-SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
+SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
     : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
 {
 }
 
-SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
+SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
     : InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
 {
 }

+ 2 - 2
Kernel/Memory/SharedInodeVMObject.h

@@ -23,8 +23,8 @@ public:
 private:
     virtual bool is_shared_inode() const override { return true; }
 
-    explicit SharedInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
-    explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
+    explicit SharedInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
+    explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
 
     virtual StringView class_name() const override { return "SharedInodeVMObject"sv; }
 

+ 4 - 4
Kernel/Memory/VMObject.cpp

@@ -17,17 +17,17 @@ SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
     return s_all_instances;
 }
 
-ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
+ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
 {
     return m_physical_pages.try_clone();
 }
 
-ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
+ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
 {
-    return FixedArray<LockRefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
+    return FixedArray<RefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
 }
 
-VMObject::VMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
+VMObject::VMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
     : m_physical_pages(move(new_physical_pages))
 {
     all_instances().with([&](auto& list) { list.append(*this); });

+ 7 - 7
Kernel/Memory/VMObject.h

@@ -8,9 +8,9 @@
 
 #include <AK/FixedArray.h>
 #include <AK/IntrusiveList.h>
+#include <AK/RefPtr.h>
 #include <Kernel/Forward.h>
 #include <Kernel/Library/ListedRefCounted.h>
-#include <Kernel/Library/LockRefPtr.h>
 #include <Kernel/Library/LockWeakable.h>
 #include <Kernel/Locking/Mutex.h>
 #include <Kernel/Memory/Region.h>
@@ -35,8 +35,8 @@ public:
 
     size_t page_count() const { return m_physical_pages.size(); }
 
-    virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
-    virtual Span<LockRefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
+    virtual Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
+    virtual Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
 
     size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
 
@@ -55,15 +55,15 @@ public:
     }
 
 protected:
-    static ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
-    ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_clone_physical_pages() const;
-    explicit VMObject(FixedArray<LockRefPtr<PhysicalPage>>&&);
+    static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
+    ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const;
+    explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&);
 
     template<typename Callback>
     void for_each_region(Callback);
 
     IntrusiveListNode<VMObject> m_list_node;
-    FixedArray<LockRefPtr<PhysicalPage>> m_physical_pages;
+    FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
 
     mutable RecursiveSpinlock m_lock { LockRank::None };
 

+ 2 - 2
Kernel/Storage/ATA/AHCI/Port.cpp

@@ -54,7 +54,7 @@ ErrorOr<void> AHCIPort::allocate_resources_and_initialize_ports()
     return {};
 }
 
-UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullLockRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
+UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
     : m_port_index(port_index)
     , m_hba_capabilities(hba_capabilities)
     , m_identify_buffer_page(move(identify_buffer_page))
@@ -496,7 +496,7 @@ Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_li
     VERIFY(m_lock.is_locked());
     VERIFY(request.block_count() > 0);
 
-    NonnullLockRefPtrVector<Memory::PhysicalPage> allocated_dma_regions;
+    NonnullRefPtrVector<Memory::PhysicalPage> allocated_dma_regions;
     for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) {
         allocated_dma_regions.append(m_dma_buffers.at(index));
     }

+ 8 - 7
Kernel/Storage/ATA/AHCI/Port.h

@@ -6,10 +6,11 @@
 
 #pragma once
 
+#include <AK/NonnullRefPtrVector.h>
 #include <AK/OwnPtr.h>
+#include <AK/RefPtr.h>
 #include <Kernel/Devices/Device.h>
 #include <Kernel/Interrupts/IRQHandler.h>
-#include <Kernel/Library/LockRefPtr.h>
 #include <Kernel/Library/LockWeakPtr.h>
 #include <Kernel/Library/LockWeakable.h>
 #include <Kernel/Locking/Mutex.h>
@@ -56,7 +57,7 @@ private:
     bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; }
     bool initialize();
 
-    AHCIPort(AHCIController const&, NonnullLockRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index);
+    AHCIPort(AHCIController const&, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index);
 
     ALWAYS_INLINE void clear_sata_error_register() const;
 
@@ -111,11 +112,11 @@ private:
 
     mutable bool m_wait_for_completion { false };
 
-    NonnullLockRefPtrVector<Memory::PhysicalPage> m_dma_buffers;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> m_command_table_pages;
-    LockRefPtr<Memory::PhysicalPage> m_command_list_page;
+    NonnullRefPtrVector<Memory::PhysicalPage> m_dma_buffers;
+    NonnullRefPtrVector<Memory::PhysicalPage> m_command_table_pages;
+    RefPtr<Memory::PhysicalPage> m_command_list_page;
     OwnPtr<Memory::Region> m_command_list_region;
-    LockRefPtr<Memory::PhysicalPage> m_fis_receive_page;
+    RefPtr<Memory::PhysicalPage> m_fis_receive_page;
     LockRefPtr<ATADevice> m_connected_device;
 
     u32 m_port_index;
@@ -125,7 +126,7 @@ private:
     // it's probably better to just "cache" this here instead.
     AHCI::HBADefinedCapabilities const m_hba_capabilities;
 
-    NonnullLockRefPtr<Memory::PhysicalPage> m_identify_buffer_page;
+    NonnullRefPtr<Memory::PhysicalPage> m_identify_buffer_page;
 
     volatile AHCI::PortRegisters& m_port_registers;
     LockWeakPtr<AHCIController> m_parent_controller;

+ 2 - 2
Kernel/Storage/ATA/ATAPort.h

@@ -145,8 +145,8 @@ protected:
 
     OwnPtr<Memory::Region> m_prdt_region;
     OwnPtr<Memory::Region> m_dma_buffer_region;
-    LockRefPtr<Memory::PhysicalPage> m_prdt_page;
-    LockRefPtr<Memory::PhysicalPage> m_dma_buffer_page;
+    RefPtr<Memory::PhysicalPage> m_prdt_page;
+    RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
 
     const u8 m_port_index;
     NonnullLockRefPtrVector<ATADevice> m_ata_devices;

+ 5 - 5
Kernel/Storage/NVMe/NVMeController.cpp

@@ -152,7 +152,7 @@ UNMAP_AFTER_INIT u32 NVMeController::get_admin_q_dept()
 UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
 {
 
-    LockRefPtr<Memory::PhysicalPage> prp_dma_buffer;
+    RefPtr<Memory::PhysicalPage> prp_dma_buffer;
     OwnPtr<Memory::Region> prp_dma_region;
     auto namespace_data_struct = TRY(ByteBuffer::create_zeroed(NVMe_IDENTIFY_SIZE));
     u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)];
@@ -259,9 +259,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
 {
     auto qdepth = get_admin_q_dept();
     OwnPtr<Memory::Region> cq_dma_region;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
+    NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
     OwnPtr<Memory::Region> sq_dma_region;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
+    NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
     auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096);
     auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096);
     if (!reset_controller()) {
@@ -300,9 +300,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
 UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<u8> irq)
 {
     OwnPtr<Memory::Region> cq_dma_region;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
+    NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
     OwnPtr<Memory::Region> sq_dma_region;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
+    NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
     auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
     auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);
 

+ 1 - 1
Kernel/Storage/NVMe/NVMeInterruptQueue.cpp

@@ -11,7 +11,7 @@
 
 namespace Kernel {
 
-UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
+UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
     , IRQHandler(irq)
 {

+ 1 - 1
Kernel/Storage/NVMe/NVMeInterruptQueue.h

@@ -13,7 +13,7 @@ namespace Kernel {
 class NVMeInterruptQueue : public NVMeQueue
     , public IRQHandler {
 public:
-    NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
+    NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
     void submit_sqe(NVMeSubmission& submission) override;
     virtual ~NVMeInterruptQueue() override {};
 

+ 1 - 1
Kernel/Storage/NVMe/NVMePollQueue.cpp

@@ -10,7 +10,7 @@
 #include "NVMeDefinitions.h"
 
 namespace Kernel {
-UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
+UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
 {
 }

+ 1 - 1
Kernel/Storage/NVMe/NVMePollQueue.h

@@ -12,7 +12,7 @@ namespace Kernel {
 
 class NVMePollQueue : public NVMeQueue {
 public:
-    NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
+    NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
     void submit_sqe(NVMeSubmission& submission) override;
     virtual ~NVMePollQueue() override {};
 

+ 3 - 3
Kernel/Storage/NVMe/NVMeQueue.cpp

@@ -13,10 +13,10 @@
 #include <Kernel/Storage/NVMe/NVMePollQueue.h>
 
 namespace Kernel {
-ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
+ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
 {
     // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
-    LockRefPtr<Memory::PhysicalPage> rw_dma_page;
+    RefPtr<Memory::PhysicalPage> rw_dma_page;
     auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
     if (!irq.has_value()) {
         auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
@@ -26,7 +26,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8
     return queue;
 }
 
-UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
+UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
     : m_current_request(nullptr)
     , m_rw_dma_region(move(rw_dma_region))
     , m_qid(qid)

+ 6 - 5
Kernel/Storage/NVMe/NVMeQueue.h

@@ -7,6 +7,7 @@
 #pragma once
 
 #include <AK/AtomicRefCounted.h>
+#include <AK/NonnullRefPtrVector.h>
 #include <AK/OwnPtr.h>
 #include <AK/Types.h>
 #include <Kernel/Bus/PCI/Device.h>
@@ -29,7 +30,7 @@ struct DoorbellRegister {
 class AsyncBlockDeviceRequest;
 class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
 public:
-    static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
+    static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
     bool is_admin_queue() { return m_admin_queue; };
     u16 submit_sync_sqe(NVMeSubmission&);
     void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
@@ -43,7 +44,7 @@ protected:
     {
         m_db_regs->sq_tail = m_sq_tail;
     }
-    NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
+    NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
 
 private:
     bool cqe_available();
@@ -70,12 +71,12 @@ private:
     u32 m_qdepth {};
     Spinlock m_sq_lock { LockRank::Interrupts };
     OwnPtr<Memory::Region> m_cq_dma_region;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> m_cq_dma_page;
+    NonnullRefPtrVector<Memory::PhysicalPage> m_cq_dma_page;
     Span<NVMeSubmission> m_sqe_array;
     OwnPtr<Memory::Region> m_sq_dma_region;
-    NonnullLockRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
+    NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
     Span<NVMeCompletion> m_cqe_array;
     Memory::TypedMapping<DoorbellRegister volatile> m_db_regs;
-    NonnullLockRefPtr<Memory::PhysicalPage> m_rw_dma_page;
+    NonnullRefPtr<Memory::PhysicalPage> m_rw_dma_page;
 };
 }