2020-02-28 19:20:35 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-02-28 19:20:35 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/InodeVMObject.h>
|
2020-02-28 19:20:35 +00:00
|
|
|
|
2021-08-06 11:49:36 +00:00
|
|
|
namespace Kernel::Memory {
|
2020-02-28 19:20:35 +00:00
|
|
|
|
2022-08-24 13:56:26 +00:00
|
|
|
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
2022-01-12 16:59:46 +00:00
|
|
|
: VMObject(move(new_physical_pages))
|
2020-02-28 19:20:35 +00:00
|
|
|
, m_inode(inode)
|
2022-02-10 17:39:17 +00:00
|
|
|
, m_dirty_pages(move(dirty_pages))
|
2020-02-28 19:20:35 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-08-24 13:56:26 +00:00
|
|
|
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
2022-01-12 16:59:46 +00:00
|
|
|
: VMObject(move(new_physical_pages))
|
2020-02-28 19:20:35 +00:00
|
|
|
, m_inode(other.m_inode)
|
2022-02-10 17:39:17 +00:00
|
|
|
, m_dirty_pages(move(dirty_pages))
|
2020-02-28 19:20:35 +00:00
|
|
|
{
|
2020-03-01 11:11:50 +00:00
|
|
|
for (size_t i = 0; i < page_count(); ++i)
|
|
|
|
m_dirty_pages.set(i, other.m_dirty_pages.get(i));
|
2020-02-28 19:20:35 +00:00
|
|
|
}
|
|
|
|
|
2022-03-16 19:15:15 +00:00
|
|
|
InodeVMObject::~InodeVMObject() = default;
|
2020-02-28 19:20:35 +00:00
|
|
|
|
|
|
|
size_t InodeVMObject::amount_clean() const
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(page_count() == m_dirty_pages.size());
|
2020-02-28 19:20:35 +00:00
|
|
|
for (size_t i = 0; i < page_count(); ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i])
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t InodeVMObject::amount_dirty() const
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
|
|
|
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
|
|
|
|
if (m_dirty_pages.get(i))
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int InodeVMObject::release_all_clean_pages()
|
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker locker(m_lock);
|
2020-02-28 19:20:35 +00:00
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
for (size_t i = 0; i < page_count(); ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
|
|
|
|
m_physical_pages[i] = nullptr;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
2021-07-23 00:40:16 +00:00
|
|
|
if (count) {
|
|
|
|
for_each_region([](auto& region) {
|
|
|
|
region.remap();
|
|
|
|
});
|
|
|
|
}
|
2020-02-28 19:20:35 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2022-08-15 03:05:42 +00:00
|
|
|
int InodeVMObject::try_release_clean_pages(int page_amount)
|
|
|
|
{
|
|
|
|
SpinlockLocker locker(m_lock);
|
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
for (size_t i = 0; i < page_count() && count < page_amount; ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
|
|
|
|
m_physical_pages[i] = nullptr;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (count) {
|
|
|
|
for_each_region([](auto& region) {
|
|
|
|
region.remap();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2020-02-28 19:20:35 +00:00
|
|
|
u32 InodeVMObject::writable_mappings() const
|
|
|
|
{
|
|
|
|
u32 count = 0;
|
|
|
|
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
|
|
|
|
if (region.is_writable())
|
|
|
|
++count;
|
|
|
|
});
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|