mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 23:50:19 +00:00
11eee67b85
Until now, our kernel has reimplemented a number of AK classes to provide automatic internal locking: - RefPtr - NonnullRefPtr - WeakPtr - Weakable This patch renames the Kernel classes so that they can coexist with the original AK classes: - RefPtr => LockRefPtr - NonnullRefPtr => NonnullLockRefPtr - WeakPtr => LockWeakPtr - Weakable => LockWeakable The goal here is to eventually get rid of the Lock* classes in favor of using external locking.
63 lines
2.4 KiB
C++
63 lines
2.4 KiB
C++
/*
|
|
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
|
#include <Kernel/Locking/Spinlock.h>
|
|
#include <Kernel/Memory/SharedInodeVMObject.h>
|
|
|
|
namespace Kernel::Memory {
|
|
|
|
ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode)
|
|
{
|
|
size_t size = inode.size();
|
|
if (auto shared_vmobject = inode.shared_vmobject())
|
|
return shared_vmobject.release_nonnull();
|
|
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
|
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
|
|
auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedInodeVMObject(inode, move(new_physical_pages), move(dirty_pages))));
|
|
TRY(vmobject->inode().set_shared_vmobject(*vmobject));
|
|
return vmobject;
|
|
}
|
|
|
|
ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone()
|
|
{
|
|
auto new_physical_pages = TRY(this->try_clone_physical_pages());
|
|
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
|
|
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
|
|
}
|
|
|
|
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
|
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
|
|
{
|
|
}
|
|
|
|
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
|
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
|
|
{
|
|
}
|
|
|
|
ErrorOr<void> SharedInodeVMObject::sync(off_t offset_in_pages, size_t pages)
|
|
{
|
|
SpinlockLocker locker(m_lock);
|
|
|
|
size_t highest_page_to_flush = min(page_count(), offset_in_pages + pages);
|
|
|
|
for (size_t page_index = offset_in_pages; page_index < highest_page_to_flush; ++page_index) {
|
|
auto& physical_page = m_physical_pages[page_index];
|
|
if (!physical_page)
|
|
continue;
|
|
|
|
u8 page_buffer[PAGE_SIZE];
|
|
MM.copy_physical_page(*physical_page, page_buffer);
|
|
|
|
MutexLocker locker(m_inode->m_inode_lock);
|
|
TRY(m_inode->write_bytes(page_index * PAGE_SIZE, PAGE_SIZE, UserOrKernelBuffer::for_kernel_buffer(page_buffer), nullptr));
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
}
|