diff --git a/Kernel/Ext2FileSystem.cpp b/Kernel/Ext2FileSystem.cpp index bbca1842a33..0a3dbcce1b7 100644 --- a/Kernel/Ext2FileSystem.cpp +++ b/Kernel/Ext2FileSystem.cpp @@ -484,6 +484,7 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, size_t count, const byte* data, F ASSERT(offset >= 0); const size_t block_size = fs().block_size(); + size_t old_size = size(); size_t new_size = max(static_cast(offset) + count, size()); unsigned blocks_needed_before = ceil_div(size(), block_size); @@ -557,6 +558,10 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, size_t count, const byte* data, F // NOTE: Make sure the cached block list is up to date! m_block_list = move(block_list); + + if (old_size != new_size) + inode_size_changed(old_size, new_size); + inode_contents_changed(offset, count, data); return nwritten; } diff --git a/Kernel/FileSystem.cpp b/Kernel/FileSystem.cpp index 6d7cd7f8952..a02828230cb 100644 --- a/Kernel/FileSystem.cpp +++ b/Kernel/FileSystem.cpp @@ -109,6 +109,18 @@ void Inode::will_be_destroyed() flush_metadata(); } +void Inode::inode_contents_changed(off_t offset, size_t size, const byte* data) +{ + if (m_vmo) + m_vmo->inode_contents_changed(Badge(), offset, size, data); +} + +void Inode::inode_size_changed(size_t old_size, size_t new_size) +{ + if (m_vmo) + m_vmo->inode_size_changed(Badge(), old_size, new_size); +} + int Inode::set_atime(time_t) { return -ENOTIMPL; diff --git a/Kernel/FileSystem.h b/Kernel/FileSystem.h index b06e34bfd88..8a4f90465ed 100644 --- a/Kernel/FileSystem.h +++ b/Kernel/FileSystem.h @@ -111,6 +111,8 @@ public: protected: Inode(FS& fs, unsigned index); void set_metadata_dirty(bool b) { m_metadata_dirty = b; } + void inode_contents_changed(off_t, size_t, const byte*); + void inode_size_changed(size_t old_size, size_t new_size); mutable Lock m_lock; diff --git a/Kernel/MemoryManager.cpp b/Kernel/MemoryManager.cpp index ad087917a17..755f33c0cd2 100644 --- a/Kernel/MemoryManager.cpp +++ b/Kernel/MemoryManager.cpp @@ -492,10 +492,11 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg #endif } -void MemoryManager::remap_region(Process& process, Region& region) +void MemoryManager::remap_region(PageDirectory& page_directory, Region& region) { InterruptDisabler disabler; - map_region_at_address(process.page_directory(), region, region.laddr(), true); + ASSERT(region.page_directory() == &page_directory); + map_region_at_address(page_directory, region, region.laddr(), true); } void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, LinearAddress laddr, bool user_allowed) @@ -587,7 +588,7 @@ RetainPtr Region::clone() // Set up a COW region. The parent (this) region becomes COW as well! for (size_t i = 0; i < page_count(); ++i) m_cow_map.set(i, true); - MM.remap_region(*current, *this); + MM.remap_region(current->page_directory(), *this); return adopt(*new Region(laddr(), size(), m_vmo->clone(), m_offset_in_vmo, String(m_name), m_readable, m_writable, true)); } @@ -734,6 +735,91 @@ VMObject::~VMObject() MM.unregister_vmo(*this); } +template +void VMObject::for_each_region(Callback callback) +{ + // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes. + // Perhaps VMObject could have a Vector with all of his mappers? + for (auto* region : MM.m_regions) { + if (®ion->vmo() == this) + callback(*region); + } +} + +void VMObject::inode_size_changed(Badge, size_t old_size, size_t new_size) +{ + InterruptDisabler disabler; + + size_t old_page_count = page_count(); + m_size = new_size; + + if (page_count() > old_page_count) { + // Add null pages and let the fault handler page these in when that day comes. + for (size_t i = old_page_count; i < page_count(); ++i) + m_physical_pages.append(nullptr); + } else { + // Prune the no-longer valid pages. I'm not sure this is actually correct behavior. + for (size_t i = page_count(); i < old_page_count; ++i) + m_physical_pages.take_last(); + } + + // FIXME: Consolidate with inode_contents_changed() so we only do a single walk. + for_each_region([] (Region& region) { + ASSERT(region.page_directory()); + MM.remap_region(*region.page_directory(), region); + }); +} + +void VMObject::inode_contents_changed(Badge, off_t offset, size_t size, const byte* data) +{ + InterruptDisabler disabler; + ASSERT(offset >= 0); + + // FIXME: Only invalidate the parts that actually changed. + for (auto& physical_page : m_physical_pages) + physical_page = nullptr; + +#if 0 + size_t current_offset = offset; + size_t remaining_bytes = size; + const byte* data_ptr = data; + + auto to_page_index = [] (size_t offset) -> size_t { + return offset / PAGE_SIZE; + }; + + if (current_offset & PAGE_MASK) { + size_t page_index = to_page_index(current_offset); + size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK)); + if (m_physical_pages[page_index]) { + auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); + memcpy(ptr, data_ptr, bytes_to_copy); + MM.unquickmap_page(); + } + current_offset += bytes_to_copy; + data += bytes_to_copy; + remaining_bytes -= bytes_to_copy; + } + + for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) { + size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK); + if (m_physical_pages[page_index]) { + auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); + memcpy(ptr, data_ptr, bytes_to_copy); + MM.unquickmap_page(); + } + current_offset += bytes_to_copy; + data += bytes_to_copy; + } +#endif + + // FIXME: Consolidate with inode_size_changed() so we only do a single walk. + for_each_region([] (Region& region) { + ASSERT(region.page_directory()); + MM.remap_region(*region.page_directory(), region); + }); +} + int Region::commit() { InterruptDisabler disabler; diff --git a/Kernel/MemoryManager.h b/Kernel/MemoryManager.h index 098e27e9380..00965382434 100644 --- a/Kernel/MemoryManager.h +++ b/Kernel/MemoryManager.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1))) @@ -98,11 +99,17 @@ public: const Vector>& physical_pages() const { return m_physical_pages; } Vector>& physical_pages() { return m_physical_pages; } + void inode_contents_changed(Badge, off_t, size_t, const byte*); + void inode_size_changed(Badge, size_t old_size, size_t new_size); + private: VMObject(RetainPtr&&); explicit VMObject(VMObject&); explicit VMObject(size_t); VMObject(PhysicalAddress, size_t); + + template void for_each_region(Callback); + String m_name; bool m_anonymous { false }; off_t m_inode_offset { 0 }; @@ -225,7 +232,7 @@ public: RetainPtr allocate_physical_page(ShouldZeroFill); RetainPtr allocate_supervisor_physical_page(); - void remap_region(Process&, Region&); + void remap_region(PageDirectory&, Region&); size_t ram_size() const { return m_ram_size; } diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index f7672988c70..b79df0b6966 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -945,6 +945,7 @@ void Process::push_value_on_stack(dword value) void Process::crash() { ASSERT_INTERRUPTS_DISABLED(); + ASSERT(is_ring3()); ASSERT(state() != Dead); m_termination_signal = SIGSEGV; dump_regions(); diff --git a/Kernel/i386.cpp b/Kernel/i386.cpp index beb68fac53c..dcee88e3037 100644 --- a/Kernel/i386.cpp +++ b/Kernel/i386.cpp @@ -278,12 +278,6 @@ void exception_14_handler(RegisterDumpWithExceptionCode& regs) } }; - if (current->is_ring0()) { - dump_registers_and_code(); - current->dump_regions(); - HANG; - } - #ifdef PAGE_FAULT_DEBUG dump_registers_and_code(); #endif @@ -296,7 +290,6 @@ void exception_14_handler(RegisterDumpWithExceptionCode& regs) current->pid(), regs.exception_code & 2 ? "write" : "read", faultAddress); - dump_registers_and_code(); current->crash(); } else if (response == PageFaultResponse::Continue) {