Kernel: Split InodeVMObject into two subclasses

We now have PrivateInodeVMObject and SharedInodeVMObject, corresponding
to MAP_PRIVATE and MAP_SHARED respectively.

Note that PrivateInodeVMObject is not used yet.
This commit is contained in:
Andreas Kling 2020-02-28 20:20:35 +01:00
parent 07a26aece3
commit 651417a085
Notes: sideshowbarker 2024-07-19 08:59:07 +09:00
9 changed files with 371 additions and 166 deletions

View file

@ -101,11 +101,13 @@ OBJS = \
TTY/VirtualConsole.o \
Thread.o \
VM/AnonymousVMObject.o \
VM/InodeVMObject.o \
VM/MemoryManager.o \
VM/PageDirectory.o \
VM/PhysicalPage.o \
VM/PhysicalRegion.o \
VM/PurgeableVMObject.o \
VM/PrivateInodeVMObject.o \
VM/RangeAllocator.o \
VM/Region.o \
VM/SharedInodeVMObject.o \

View file

@ -663,12 +663,12 @@ int Process::sys$purge(int mode)
}
}
if (mode & PURGE_ALL_CLEAN_INODE) {
NonnullRefPtrVector<SharedInodeVMObject> vmobjects;
NonnullRefPtrVector<InodeVMObject> vmobjects;
{
InterruptDisabler disabler;
MM.for_each_vmobject([&](auto& vmobject) {
if (vmobject.is_inode())
vmobjects.append(static_cast<SharedInodeVMObject&>(vmobject));
vmobjects.append(static_cast<InodeVMObject&>(vmobject));
return IterationDecision::Continue;
});
}

182
Kernel/VM/InodeVMObject.cpp Normal file
View file

@ -0,0 +1,182 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/InodeVMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/Region.h>
namespace Kernel {
InodeVMObject::InodeVMObject(Inode& inode, size_t size)
: VMObject(size)
, m_inode(inode)
, m_dirty_pages(page_count(), false)
{
}
InodeVMObject::InodeVMObject(const InodeVMObject& other)
: VMObject(other)
, m_inode(other.m_inode)
{
}
InodeVMObject::~InodeVMObject()
{
}
size_t InodeVMObject::amount_clean() const
{
size_t count = 0;
ASSERT(page_count() == (size_t)m_dirty_pages.size());
for (size_t i = 0; i < page_count(); ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i])
++count;
}
return count * PAGE_SIZE;
}
size_t InodeVMObject::amount_dirty() const
{
size_t count = 0;
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
if (m_dirty_pages.get(i))
++count;
}
return count * PAGE_SIZE;
}
void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
{
dbg() << "VMObject::inode_size_changed: {" << m_inode->fsid() << ":" << m_inode->index() << "} " << old_size << " -> " << new_size;
InterruptDisabler disabler;
auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
m_physical_pages.resize(new_page_count);
m_dirty_pages.grow(new_page_count, false);
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
for_each_region([](auto& region) {
region.remap();
});
}
void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
{
(void)size;
(void)data;
InterruptDisabler disabler;
ASSERT(offset >= 0);
// FIXME: Only invalidate the parts that actually changed.
for (auto& physical_page : m_physical_pages)
physical_page = nullptr;
#if 0
size_t current_offset = offset;
size_t remaining_bytes = size;
const u8* data_ptr = data;
auto to_page_index = [] (size_t offset) -> size_t {
return offset / PAGE_SIZE;
};
if (current_offset & PAGE_MASK) {
size_t page_index = to_page_index(current_offset);
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
if (m_physical_pages[page_index]) {
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
memcpy(ptr, data_ptr, bytes_to_copy);
MM.unquickmap_page();
}
current_offset += bytes_to_copy;
data += bytes_to_copy;
remaining_bytes -= bytes_to_copy;
}
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
if (m_physical_pages[page_index]) {
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
memcpy(ptr, data_ptr, bytes_to_copy);
MM.unquickmap_page();
}
current_offset += bytes_to_copy;
data += bytes_to_copy;
}
#endif
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
for_each_region([](auto& region) {
region.remap();
});
}
int InodeVMObject::release_all_clean_pages()
{
LOCKER(m_paging_lock);
return release_all_clean_pages_impl();
}
int InodeVMObject::release_all_clean_pages_impl()
{
int count = 0;
InterruptDisabler disabler;
for (size_t i = 0; i < page_count(); ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
m_physical_pages[i] = nullptr;
++count;
}
}
for_each_region([](auto& region) {
region.remap();
});
return count;
}
u32 InodeVMObject::writable_mappings() const
{
u32 count = 0;
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
if (region.is_writable())
++count;
});
return count;
}
u32 InodeVMObject::executable_mappings() const
{
u32 count = 0;
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
if (region.is_executable())
++count;
});
return count;
}
}

69
Kernel/VM/InodeVMObject.h Normal file
View file

@ -0,0 +1,69 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <AK/Bitmap.h>
#include <Kernel/UnixTypes.h>
#include <Kernel/VM/VMObject.h>
namespace Kernel {
class InodeVMObject : public VMObject {
public:
virtual ~InodeVMObject() override;
Inode& inode() { return *m_inode; }
const Inode& inode() const { return *m_inode; }
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
size_t amount_dirty() const;
size_t amount_clean() const;
int release_all_clean_pages();
u32 writable_mappings() const;
u32 executable_mappings() const;
protected:
explicit InodeVMObject(Inode&, size_t);
explicit InodeVMObject(const InodeVMObject&);
InodeVMObject& operator=(const InodeVMObject&) = delete;
InodeVMObject& operator=(InodeVMObject&&) = delete;
InodeVMObject(InodeVMObject&&) = delete;
virtual bool is_inode() const final { return true; }
int release_all_clean_pages_impl();
NonnullRefPtr<Inode> m_inode;
Bitmap m_dirty_pages;
};
}

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/PrivateInodeVMObject.h>
namespace Kernel {
NonnullRefPtr<PrivateInodeVMObject> PrivateInodeVMObject::create_with_inode(Inode& inode)
{
return adopt(*new PrivateInodeVMObject(inode, inode.size()));
}
NonnullRefPtr<VMObject> PrivateInodeVMObject::clone()
{
return adopt(*new PrivateInodeVMObject(*this));
}
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, size_t size)
: InodeVMObject(inode, size)
{
}
PrivateInodeVMObject::PrivateInodeVMObject(const PrivateInodeVMObject& other)
: InodeVMObject(other)
{
}
PrivateInodeVMObject::~PrivateInodeVMObject()
{
}
}

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <AK/Bitmap.h>
#include <Kernel/UnixTypes.h>
#include <Kernel/VM/InodeVMObject.h>
namespace Kernel {
class PrivateInodeVMObject final : public InodeVMObject {
AK_MAKE_NONMOVABLE(PrivateInodeVMObject);
public:
virtual ~PrivateInodeVMObject() override;
static NonnullRefPtr<PrivateInodeVMObject> create_with_inode(Inode&);
virtual NonnullRefPtr<VMObject> clone() override;
private:
explicit PrivateInodeVMObject(Inode&, size_t);
explicit PrivateInodeVMObject(const PrivateInodeVMObject&);
PrivateInodeVMObject& operator=(const PrivateInodeVMObject&) = delete;
};
}

View file

@ -452,7 +452,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
LOCKER(vmobject().m_paging_lock);
cli();
auto& inode_vmobject = static_cast<SharedInodeVMObject&>(vmobject());
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region];
#ifdef PAGE_FAULT_DEBUG

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -25,9 +25,9 @@
*/
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/SharedInodeVMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/Region.h>
#include <Kernel/VM/SharedInodeVMObject.h>
namespace Kernel {
@ -47,15 +47,12 @@ NonnullRefPtr<VMObject> SharedInodeVMObject::clone()
}
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size)
: VMObject(size)
, m_inode(inode)
, m_dirty_pages(page_count(), false)
: InodeVMObject(inode, size)
{
}
SharedInodeVMObject::SharedInodeVMObject(const SharedInodeVMObject& other)
: VMObject(other)
, m_inode(other.m_inode)
: InodeVMObject(other)
{
}
@ -64,135 +61,4 @@ SharedInodeVMObject::~SharedInodeVMObject()
ASSERT(inode().shared_vmobject() == this);
}
size_t SharedInodeVMObject::amount_clean() const
{
size_t count = 0;
ASSERT(page_count() == (size_t)m_dirty_pages.size());
for (size_t i = 0; i < page_count(); ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i])
++count;
}
return count * PAGE_SIZE;
}
size_t SharedInodeVMObject::amount_dirty() const
{
size_t count = 0;
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
if (m_dirty_pages.get(i))
++count;
}
return count * PAGE_SIZE;
}
void SharedInodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
{
dbg() << "VMObject::inode_size_changed: {" << m_inode->fsid() << ":" << m_inode->index() << "} " << old_size << " -> " << new_size;
InterruptDisabler disabler;
auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
m_physical_pages.resize(new_page_count);
m_dirty_pages.grow(new_page_count, false);
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
for_each_region([](auto& region) {
region.remap();
});
}
void SharedInodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
{
(void)size;
(void)data;
InterruptDisabler disabler;
ASSERT(offset >= 0);
// FIXME: Only invalidate the parts that actually changed.
for (auto& physical_page : m_physical_pages)
physical_page = nullptr;
#if 0
size_t current_offset = offset;
size_t remaining_bytes = size;
const u8* data_ptr = data;
auto to_page_index = [] (size_t offset) -> size_t {
return offset / PAGE_SIZE;
};
if (current_offset & PAGE_MASK) {
size_t page_index = to_page_index(current_offset);
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
if (m_physical_pages[page_index]) {
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
memcpy(ptr, data_ptr, bytes_to_copy);
MM.unquickmap_page();
}
current_offset += bytes_to_copy;
data += bytes_to_copy;
remaining_bytes -= bytes_to_copy;
}
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
if (m_physical_pages[page_index]) {
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
memcpy(ptr, data_ptr, bytes_to_copy);
MM.unquickmap_page();
}
current_offset += bytes_to_copy;
data += bytes_to_copy;
}
#endif
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
for_each_region([](auto& region) {
region.remap();
});
}
int SharedInodeVMObject::release_all_clean_pages()
{
LOCKER(m_paging_lock);
return release_all_clean_pages_impl();
}
int SharedInodeVMObject::release_all_clean_pages_impl()
{
int count = 0;
InterruptDisabler disabler;
for (size_t i = 0; i < page_count(); ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
m_physical_pages[i] = nullptr;
++count;
}
}
for_each_region([](auto& region) {
region.remap();
});
return count;
}
u32 SharedInodeVMObject::writable_mappings() const
{
u32 count = 0;
const_cast<SharedInodeVMObject&>(*this).for_each_region([&](auto& region) {
if (region.is_writable())
++count;
});
return count;
}
u32 SharedInodeVMObject::executable_mappings() const
{
u32 count = 0;
const_cast<SharedInodeVMObject&>(*this).for_each_region([&](auto& region) {
if (region.is_executable())
++count;
});
return count;
}
}

View file

@ -28,45 +28,24 @@
#include <AK/Bitmap.h>
#include <Kernel/UnixTypes.h>
#include <Kernel/VM/VMObject.h>
#include <Kernel/VM/InodeVMObject.h>
namespace Kernel {
class SharedInodeVMObject final : public VMObject {
class SharedInodeVMObject final : public InodeVMObject {
AK_MAKE_NONMOVABLE(SharedInodeVMObject);
public:
virtual ~SharedInodeVMObject() override;
static NonnullRefPtr<SharedInodeVMObject> create_with_inode(Inode&);
virtual NonnullRefPtr<VMObject> clone() override;
Inode& inode() { return *m_inode; }
const Inode& inode() const { return *m_inode; }
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
size_t amount_dirty() const;
size_t amount_clean() const;
int release_all_clean_pages();
u32 writable_mappings() const;
u32 executable_mappings() const;
private:
explicit SharedInodeVMObject(Inode&, size_t);
explicit SharedInodeVMObject(const SharedInodeVMObject&);
SharedInodeVMObject& operator=(const SharedInodeVMObject&) = delete;
SharedInodeVMObject& operator=(SharedInodeVMObject&&) = delete;
SharedInodeVMObject(SharedInodeVMObject&&) = delete;
virtual bool is_inode() const override { return true; }
int release_all_clean_pages_impl();
NonnullRefPtr<Inode> m_inode;
Bitmap m_dirty_pages;
};
}