mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 15:40:19 +00:00
Kernel: Invalidate file-backed VMO's when inodes are written.
The current strategy is simply to nuke all physical pages and force reload them from disk. This is obviously not optimal and should eventually be optimized. It should be fairly straightforward.
This commit is contained in:
parent
af21a45b1a
commit
ca16d9d98e
Notes:
sideshowbarker
2024-07-19 15:51:58 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/ca16d9d98e7
7 changed files with 117 additions and 11 deletions
|
@ -484,6 +484,7 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, size_t count, const byte* data, F
|
||||||
ASSERT(offset >= 0);
|
ASSERT(offset >= 0);
|
||||||
|
|
||||||
const size_t block_size = fs().block_size();
|
const size_t block_size = fs().block_size();
|
||||||
|
size_t old_size = size();
|
||||||
size_t new_size = max(static_cast<size_t>(offset) + count, size());
|
size_t new_size = max(static_cast<size_t>(offset) + count, size());
|
||||||
|
|
||||||
unsigned blocks_needed_before = ceil_div(size(), block_size);
|
unsigned blocks_needed_before = ceil_div(size(), block_size);
|
||||||
|
@ -557,6 +558,10 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, size_t count, const byte* data, F
|
||||||
|
|
||||||
// NOTE: Make sure the cached block list is up to date!
|
// NOTE: Make sure the cached block list is up to date!
|
||||||
m_block_list = move(block_list);
|
m_block_list = move(block_list);
|
||||||
|
|
||||||
|
if (old_size != new_size)
|
||||||
|
inode_size_changed(old_size, new_size);
|
||||||
|
inode_contents_changed(offset, count, data);
|
||||||
return nwritten;
|
return nwritten;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,6 +109,18 @@ void Inode::will_be_destroyed()
|
||||||
flush_metadata();
|
flush_metadata();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Inode::inode_contents_changed(off_t offset, size_t size, const byte* data)
|
||||||
|
{
|
||||||
|
if (m_vmo)
|
||||||
|
m_vmo->inode_contents_changed(Badge<Inode>(), offset, size, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Inode::inode_size_changed(size_t old_size, size_t new_size)
|
||||||
|
{
|
||||||
|
if (m_vmo)
|
||||||
|
m_vmo->inode_size_changed(Badge<Inode>(), old_size, new_size);
|
||||||
|
}
|
||||||
|
|
||||||
int Inode::set_atime(time_t)
|
int Inode::set_atime(time_t)
|
||||||
{
|
{
|
||||||
return -ENOTIMPL;
|
return -ENOTIMPL;
|
||||||
|
|
|
@ -111,6 +111,8 @@ public:
|
||||||
protected:
|
protected:
|
||||||
Inode(FS& fs, unsigned index);
|
Inode(FS& fs, unsigned index);
|
||||||
void set_metadata_dirty(bool b) { m_metadata_dirty = b; }
|
void set_metadata_dirty(bool b) { m_metadata_dirty = b; }
|
||||||
|
void inode_contents_changed(off_t, size_t, const byte*);
|
||||||
|
void inode_size_changed(size_t old_size, size_t new_size);
|
||||||
|
|
||||||
mutable Lock m_lock;
|
mutable Lock m_lock;
|
||||||
|
|
||||||
|
|
|
@ -492,10 +492,11 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::remap_region(Process& process, Region& region)
|
void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
map_region_at_address(process.page_directory(), region, region.laddr(), true);
|
ASSERT(region.page_directory() == &page_directory);
|
||||||
|
map_region_at_address(page_directory, region, region.laddr(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, LinearAddress laddr, bool user_allowed)
|
void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, LinearAddress laddr, bool user_allowed)
|
||||||
|
@ -587,7 +588,7 @@ RetainPtr<Region> Region::clone()
|
||||||
// Set up a COW region. The parent (this) region becomes COW as well!
|
// Set up a COW region. The parent (this) region becomes COW as well!
|
||||||
for (size_t i = 0; i < page_count(); ++i)
|
for (size_t i = 0; i < page_count(); ++i)
|
||||||
m_cow_map.set(i, true);
|
m_cow_map.set(i, true);
|
||||||
MM.remap_region(*current, *this);
|
MM.remap_region(current->page_directory(), *this);
|
||||||
return adopt(*new Region(laddr(), size(), m_vmo->clone(), m_offset_in_vmo, String(m_name), m_readable, m_writable, true));
|
return adopt(*new Region(laddr(), size(), m_vmo->clone(), m_offset_in_vmo, String(m_name), m_readable, m_writable, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -734,6 +735,91 @@ VMObject::~VMObject()
|
||||||
MM.unregister_vmo(*this);
|
MM.unregister_vmo(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename Callback>
|
||||||
|
void VMObject::for_each_region(Callback callback)
|
||||||
|
{
|
||||||
|
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
||||||
|
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
||||||
|
for (auto* region : MM.m_regions) {
|
||||||
|
if (®ion->vmo() == this)
|
||||||
|
callback(*region);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
||||||
|
{
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
|
||||||
|
size_t old_page_count = page_count();
|
||||||
|
m_size = new_size;
|
||||||
|
|
||||||
|
if (page_count() > old_page_count) {
|
||||||
|
// Add null pages and let the fault handler page these in when that day comes.
|
||||||
|
for (size_t i = old_page_count; i < page_count(); ++i)
|
||||||
|
m_physical_pages.append(nullptr);
|
||||||
|
} else {
|
||||||
|
// Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
|
||||||
|
for (size_t i = page_count(); i < old_page_count; ++i)
|
||||||
|
m_physical_pages.take_last();
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
||||||
|
for_each_region([] (Region& region) {
|
||||||
|
ASSERT(region.page_directory());
|
||||||
|
MM.remap_region(*region.page_directory(), region);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, size_t size, const byte* data)
|
||||||
|
{
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
ASSERT(offset >= 0);
|
||||||
|
|
||||||
|
// FIXME: Only invalidate the parts that actually changed.
|
||||||
|
for (auto& physical_page : m_physical_pages)
|
||||||
|
physical_page = nullptr;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
size_t current_offset = offset;
|
||||||
|
size_t remaining_bytes = size;
|
||||||
|
const byte* data_ptr = data;
|
||||||
|
|
||||||
|
auto to_page_index = [] (size_t offset) -> size_t {
|
||||||
|
return offset / PAGE_SIZE;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (current_offset & PAGE_MASK) {
|
||||||
|
size_t page_index = to_page_index(current_offset);
|
||||||
|
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
|
||||||
|
if (m_physical_pages[page_index]) {
|
||||||
|
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
||||||
|
memcpy(ptr, data_ptr, bytes_to_copy);
|
||||||
|
MM.unquickmap_page();
|
||||||
|
}
|
||||||
|
current_offset += bytes_to_copy;
|
||||||
|
data += bytes_to_copy;
|
||||||
|
remaining_bytes -= bytes_to_copy;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
|
||||||
|
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
|
||||||
|
if (m_physical_pages[page_index]) {
|
||||||
|
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
||||||
|
memcpy(ptr, data_ptr, bytes_to_copy);
|
||||||
|
MM.unquickmap_page();
|
||||||
|
}
|
||||||
|
current_offset += bytes_to_copy;
|
||||||
|
data += bytes_to_copy;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
|
||||||
|
for_each_region([] (Region& region) {
|
||||||
|
ASSERT(region.page_directory());
|
||||||
|
MM.remap_region(*region.page_directory(), region);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
int Region::commit()
|
int Region::commit()
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include <AK/Vector.h>
|
#include <AK/Vector.h>
|
||||||
#include <AK/HashTable.h>
|
#include <AK/HashTable.h>
|
||||||
#include <AK/AKString.h>
|
#include <AK/AKString.h>
|
||||||
|
#include <AK/Badge.h>
|
||||||
#include <Kernel/VirtualFileSystem.h>
|
#include <Kernel/VirtualFileSystem.h>
|
||||||
|
|
||||||
#define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
|
#define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
|
||||||
|
@ -98,11 +99,17 @@ public:
|
||||||
const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||||
Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||||
|
|
||||||
|
void inode_contents_changed(Badge<Inode>, off_t, size_t, const byte*);
|
||||||
|
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VMObject(RetainPtr<Inode>&&);
|
VMObject(RetainPtr<Inode>&&);
|
||||||
explicit VMObject(VMObject&);
|
explicit VMObject(VMObject&);
|
||||||
explicit VMObject(size_t);
|
explicit VMObject(size_t);
|
||||||
VMObject(PhysicalAddress, size_t);
|
VMObject(PhysicalAddress, size_t);
|
||||||
|
|
||||||
|
template<typename Callback> void for_each_region(Callback);
|
||||||
|
|
||||||
String m_name;
|
String m_name;
|
||||||
bool m_anonymous { false };
|
bool m_anonymous { false };
|
||||||
off_t m_inode_offset { 0 };
|
off_t m_inode_offset { 0 };
|
||||||
|
@ -225,7 +232,7 @@ public:
|
||||||
RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill);
|
RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill);
|
||||||
RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
|
RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
|
||||||
|
|
||||||
void remap_region(Process&, Region&);
|
void remap_region(PageDirectory&, Region&);
|
||||||
|
|
||||||
size_t ram_size() const { return m_ram_size; }
|
size_t ram_size() const { return m_ram_size; }
|
||||||
|
|
||||||
|
|
|
@ -945,6 +945,7 @@ void Process::push_value_on_stack(dword value)
|
||||||
void Process::crash()
|
void Process::crash()
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
|
ASSERT(is_ring3());
|
||||||
ASSERT(state() != Dead);
|
ASSERT(state() != Dead);
|
||||||
m_termination_signal = SIGSEGV;
|
m_termination_signal = SIGSEGV;
|
||||||
dump_regions();
|
dump_regions();
|
||||||
|
|
|
@ -278,12 +278,6 @@ void exception_14_handler(RegisterDumpWithExceptionCode& regs)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if (current->is_ring0()) {
|
|
||||||
dump_registers_and_code();
|
|
||||||
current->dump_regions();
|
|
||||||
HANG;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dump_registers_and_code();
|
dump_registers_and_code();
|
||||||
#endif
|
#endif
|
||||||
|
@ -296,7 +290,6 @@ void exception_14_handler(RegisterDumpWithExceptionCode& regs)
|
||||||
current->pid(),
|
current->pid(),
|
||||||
regs.exception_code & 2 ? "write" : "read",
|
regs.exception_code & 2 ? "write" : "read",
|
||||||
faultAddress);
|
faultAddress);
|
||||||
|
|
||||||
dump_registers_and_code();
|
dump_registers_and_code();
|
||||||
current->crash();
|
current->crash();
|
||||||
} else if (response == PageFaultResponse::Continue) {
|
} else if (response == PageFaultResponse::Continue) {
|
||||||
|
|
Loading…
Reference in a new issue