mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-26 09:30:24 +00:00
Kernel: Make VMOBject construction OOM-aware
This commit moves the allocation of the resources required for VMObject from its constructors to the constructors of its child classes. We're making this change to give the child classes the chance to expose the fallibility of the allocation.
This commit is contained in:
parent
64778f9e69
commit
d1f265e851
Notes:
sideshowbarker
2024-07-17 20:50:21 +09:00
Author: https://github.com/creator1creeper1 Commit: https://github.com/SerenityOS/serenity/commit/d1f265e8517 Pull-request: https://github.com/SerenityOS/serenity/pull/11843 Reviewed-by: https://github.com/IdanHo ✅ Reviewed-by: https://github.com/bgianfo
4 changed files with 30 additions and 13 deletions
|
@ -114,7 +114,7 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_phys
|
|||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
|
||||
: VMObject(size)
|
||||
: VMObject(VMObject::must_create_physical_pages_but_fixme_should_propagate_errors(size))
|
||||
, m_unused_committed_pages(move(committed_pages))
|
||||
{
|
||||
if (strategy == AllocationStrategy::AllocateNow) {
|
||||
|
@ -129,7 +129,7 @@ AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, O
|
|||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
||||
: VMObject(size)
|
||||
: VMObject(VMObject::must_create_physical_pages_but_fixme_should_propagate_errors(size))
|
||||
{
|
||||
VERIFY(paddr.page_base() == paddr);
|
||||
for (size_t i = 0; i < page_count(); ++i)
|
||||
|
@ -137,7 +137,7 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
|||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
|
||||
: VMObject(physical_pages.size() * PAGE_SIZE)
|
||||
: VMObject(VMObject::must_create_physical_pages_but_fixme_should_propagate_errors(physical_pages.size() * PAGE_SIZE))
|
||||
{
|
||||
for (size_t i = 0; i < physical_pages.size(); ++i) {
|
||||
m_physical_pages[i] = physical_pages[i];
|
||||
|
@ -145,7 +145,7 @@ AnonymousVMObject::AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>> physical_
|
|||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other, NonnullRefPtr<SharedCommittedCowPages> shared_committed_cow_pages)
|
||||
: VMObject(other)
|
||||
: VMObject(other.must_clone_physical_pages_but_fixme_should_propagate_errors())
|
||||
, m_shared_committed_cow_pages(move(shared_committed_cow_pages))
|
||||
, m_purgeable(other.m_purgeable)
|
||||
{
|
||||
|
|
|
@ -10,14 +10,14 @@
|
|||
namespace Kernel::Memory {
|
||||
|
||||
InodeVMObject::InodeVMObject(Inode& inode, size_t size)
|
||||
: VMObject(size)
|
||||
: VMObject(VMObject::must_create_physical_pages_but_fixme_should_propagate_errors(size))
|
||||
, m_inode(inode)
|
||||
, m_dirty_pages(page_count(), false)
|
||||
{
|
||||
}
|
||||
|
||||
InodeVMObject::InodeVMObject(InodeVMObject const& other)
|
||||
: VMObject(other)
|
||||
: VMObject(other.must_clone_physical_pages_but_fixme_should_propagate_errors())
|
||||
, m_inode(other.m_inode)
|
||||
, m_dirty_pages(page_count(), false)
|
||||
{
|
||||
|
|
|
@ -17,14 +17,28 @@ SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
|
|||
return s_all_instances;
|
||||
}
|
||||
|
||||
VMObject::VMObject(VMObject const& other)
|
||||
: m_physical_pages(other.m_physical_pages.must_clone_but_fixme_should_propagate_errors())
|
||||
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
|
||||
{
|
||||
all_instances().with([&](auto& list) { list.append(*this); });
|
||||
return m_physical_pages.try_clone();
|
||||
}
|
||||
|
||||
VMObject::VMObject(size_t size)
|
||||
: m_physical_pages(FixedArray<RefPtr<PhysicalPage>>::must_create_but_fixme_should_propagate_errors(ceil_div(size, static_cast<size_t>(PAGE_SIZE))))
|
||||
FixedArray<RefPtr<PhysicalPage>> VMObject::must_clone_physical_pages_but_fixme_should_propagate_errors() const
|
||||
{
|
||||
return MUST(try_clone_physical_pages());
|
||||
}
|
||||
|
||||
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
|
||||
{
|
||||
return FixedArray<RefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
|
||||
}
|
||||
|
||||
FixedArray<RefPtr<PhysicalPage>> VMObject::must_create_physical_pages_but_fixme_should_propagate_errors(size_t size)
|
||||
{
|
||||
return MUST(try_create_physical_pages(size));
|
||||
}
|
||||
|
||||
VMObject::VMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: m_physical_pages(move(new_physical_pages))
|
||||
{
|
||||
all_instances().with([&](auto& list) { list.append(*this); });
|
||||
}
|
||||
|
|
|
@ -54,8 +54,11 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
explicit VMObject(size_t);
|
||||
explicit VMObject(VMObject const&);
|
||||
static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
|
||||
static FixedArray<RefPtr<PhysicalPage>> must_create_physical_pages_but_fixme_should_propagate_errors(size_t);
|
||||
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const;
|
||||
FixedArray<RefPtr<PhysicalPage>> must_clone_physical_pages_but_fixme_should_propagate_errors() const;
|
||||
explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&);
|
||||
|
||||
template<typename Callback>
|
||||
void for_each_region(Callback);
|
||||
|
|
Loading…
Reference in a new issue