mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-21 23:20:20 +00:00
Kernel: Remove global MM lock in favor of SpinlockProtected
Globally shared MemoryManager state is now kept in a GlobalData struct and wrapped in SpinlockProtected. A small set of members are left outside the GlobalData struct as they are only set during boot initialization, and then remain constant. This allows us to access those members without taking any locks.
This commit is contained in:
parent
36225c0ae7
commit
a3b2b20782
Notes:
sideshowbarker
2024-07-17 07:44:16 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/a3b2b20782
6 changed files with 470 additions and 456 deletions
File diff suppressed because it is too large
Load diff
|
@ -93,9 +93,6 @@ struct MemoryManagerData {
|
|||
u32 m_quickmap_prev_flags;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(readability-redundant-declaration) FIXME: Why do we declare this here *and* in Thread.h?
|
||||
extern RecursiveSpinlock s_mm_lock;
|
||||
|
||||
// This class represents a set of committed physical pages.
|
||||
// When you ask MemoryManager to commit pages for you, you get one of these in return.
|
||||
// You can allocate pages from it via `take_one()`
|
||||
|
@ -192,12 +189,7 @@ public:
|
|||
PhysicalSize physical_pages_uncommitted { 0 };
|
||||
};
|
||||
|
||||
SystemMemoryInfo get_system_memory_info()
|
||||
{
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
verify_system_memory_info_consistency();
|
||||
return m_system_memory_info;
|
||||
}
|
||||
SystemMemoryInfo get_system_memory_info();
|
||||
|
||||
template<IteratorFunction<VMObject&> Callback>
|
||||
static void for_each_vmobject(Callback callback)
|
||||
|
@ -230,7 +222,14 @@ public:
|
|||
|
||||
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
||||
|
||||
Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
|
||||
template<typename Callback>
|
||||
void for_each_used_memory_range(Callback callback)
|
||||
{
|
||||
m_global_data.template with([&](auto& global_data) {
|
||||
for (auto& range : global_data.used_memory_ranges)
|
||||
callback(range);
|
||||
});
|
||||
}
|
||||
bool is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress, size_t read_length) const;
|
||||
|
||||
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
|
||||
|
@ -278,29 +277,34 @@ private:
|
|||
};
|
||||
void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease);
|
||||
|
||||
ALWAYS_INLINE void verify_system_memory_info_consistency() const
|
||||
{
|
||||
auto physical_pages_unused = m_system_memory_info.physical_pages_committed + m_system_memory_info.physical_pages_uncommitted;
|
||||
VERIFY(m_system_memory_info.physical_pages == (m_system_memory_info.physical_pages_used + physical_pages_unused));
|
||||
}
|
||||
|
||||
// NOTE: These are outside of GlobalData as they are only assigned on startup,
|
||||
// and then never change. Atomic ref-counting covers that case without
|
||||
// the need for additional synchronization.
|
||||
LockRefPtr<PageDirectory> m_kernel_page_directory;
|
||||
|
||||
RefPtr<PhysicalPage> m_shared_zero_page;
|
||||
RefPtr<PhysicalPage> m_lazy_committed_page;
|
||||
|
||||
SystemMemoryInfo m_system_memory_info;
|
||||
|
||||
NonnullOwnPtrVector<PhysicalRegion> m_physical_regions;
|
||||
OwnPtr<PhysicalRegion> m_physical_pages_region;
|
||||
// NOTE: These are outside of GlobalData as they are initialized on startup,
|
||||
// and then never change.
|
||||
PhysicalPageEntry* m_physical_page_entries { nullptr };
|
||||
size_t m_physical_page_entries_count { 0 };
|
||||
|
||||
SpinlockProtected<RegionTree> m_region_tree;
|
||||
struct GlobalData {
|
||||
GlobalData();
|
||||
|
||||
Vector<UsedMemoryRange> m_used_memory_ranges;
|
||||
Vector<PhysicalMemoryRange> m_physical_memory_ranges;
|
||||
Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
|
||||
SystemMemoryInfo system_memory_info;
|
||||
|
||||
NonnullOwnPtrVector<PhysicalRegion> physical_regions;
|
||||
OwnPtr<PhysicalRegion> physical_pages_region;
|
||||
|
||||
RegionTree region_tree;
|
||||
|
||||
Vector<UsedMemoryRange> used_memory_ranges;
|
||||
Vector<PhysicalMemoryRange> physical_memory_ranges;
|
||||
Vector<ContiguousReservedMemoryRange> reserved_memory_ranges;
|
||||
};
|
||||
|
||||
SpinlockProtected<GlobalData> m_global_data;
|
||||
};
|
||||
|
||||
inline bool is_user_address(VirtualAddress vaddr)
|
||||
|
|
|
@ -465,7 +465,6 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
|||
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
||||
{
|
||||
VERIFY(vmobject().is_inode());
|
||||
VERIFY(!s_mm_lock.is_locked_by_current_processor());
|
||||
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
|
||||
|
||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
|
||||
|
|
|
@ -261,10 +261,6 @@ void Scheduler::yield()
|
|||
|
||||
void Scheduler::context_switch(Thread* thread)
|
||||
{
|
||||
if (Memory::s_mm_lock.is_locked_by_current_processor()) {
|
||||
PANIC("In context switch while holding Memory::s_mm_lock");
|
||||
}
|
||||
|
||||
thread->did_schedule();
|
||||
|
||||
auto* from_thread = Thread::current();
|
||||
|
|
|
@ -41,7 +41,7 @@ RamdiskController::RamdiskController()
|
|||
{
|
||||
// Populate ramdisk controllers from Multiboot boot modules, if any.
|
||||
size_t count = 0;
|
||||
for (auto& used_memory_range : MM.used_memory_ranges()) {
|
||||
MM.for_each_used_memory_range([&](auto& used_memory_range) {
|
||||
if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) {
|
||||
size_t length = Memory::page_round_up(used_memory_range.end.get()).release_value_but_fixme_should_propagate_errors() - used_memory_range.start.get();
|
||||
auto region_or_error = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk"sv, Memory::Region::Access::ReadWrite);
|
||||
|
@ -52,7 +52,7 @@ RamdiskController::RamdiskController()
|
|||
}
|
||||
count++;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
RamdiskController::~RamdiskController() = default;
|
||||
|
|
|
@ -144,7 +144,6 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo
|
|||
VERIFY(!Processor::current_in_irq());
|
||||
VERIFY(this == Thread::current());
|
||||
ScopedCritical critical;
|
||||
VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
|
||||
|
||||
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
||||
|
||||
|
@ -259,7 +258,6 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock>& lock_lock, u32
|
|||
VERIFY(!Processor::current_in_irq());
|
||||
VERIFY(this == Thread::current());
|
||||
ScopedCritical critical;
|
||||
VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
|
||||
|
||||
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
||||
SpinlockLocker block_lock(m_block_lock);
|
||||
|
|
Loading…
Reference in a new issue