Browse Source

Kernel: Add Region helpers for accessing underlying physical pages

Since a Region is basically a view into a potentially larger VMObject,
it was always necessary to include the Region starting offset when
accessing its underlying physical pages.

Until now, you had to do that manually, but this patch adds a simple
Region::physical_page() for read-only access and a physical_page_slot()
when you want a mutable reference to the RefPtr<PhysicalPage> itself.

A lot of code is simplified by making use of this.
Andreas Kling 5 years ago
parent
commit
9c856811b2

+ 1 - 1
Kernel/Devices/SB16.cpp

@@ -179,7 +179,7 @@ ssize_t SB16::read(FileDescription&, size_t, u8*, ssize_t)
 
 
 void SB16::dma_start(uint32_t length)
 void SB16::dma_start(uint32_t length)
 {
 {
-    const auto addr = m_dma_region->vmobject().physical_pages()[0]->paddr().get();
+    const auto addr = m_dma_region->physical_page(0)->paddr().get();
     const u8 channel = 5; // 16-bit samples use DMA channel 5 (on the master DMA controller)
     const u8 channel = 5; // 16-bit samples use DMA channel 5 (on the master DMA controller)
     const u8 mode = 0;
     const u8 mode = 0;
 
 

+ 3 - 4
Kernel/FileSystem/ProcFS.cpp

@@ -319,11 +319,10 @@ Optional<KBuffer> procfs$pid_vm(InodeIdentifier identifier)
 
 
         StringBuilder pagemap_builder;
         StringBuilder pagemap_builder;
         for (size_t i = 0; i < region.page_count(); ++i) {
         for (size_t i = 0; i < region.page_count(); ++i) {
-            auto page_index = region.first_page_index() + i;
-            auto& physical_page_slot = region.vmobject().physical_pages()[page_index];
-            if (!physical_page_slot)
+            auto* page = region.physical_page(i);
+            if (!page)
                 pagemap_builder.append('N');
                 pagemap_builder.append('N');
-            else if (physical_page_slot == MM.shared_zero_page())
+            else if (page->is_shared_zero_page())
                 pagemap_builder.append('Z');
                 pagemap_builder.append('Z');
             else
             else
                 pagemap_builder.append('P');
                 pagemap_builder.append('P');

+ 12 - 8
Kernel/Net/E1000NetworkAdapter.cpp

@@ -279,12 +279,14 @@ void E1000NetworkAdapter::initialize_rx_descriptors()
     auto* rx_descriptors = (e1000_tx_desc*)m_rx_descriptors_region->vaddr().as_ptr();
     auto* rx_descriptors = (e1000_tx_desc*)m_rx_descriptors_region->vaddr().as_ptr();
     for (int i = 0; i < number_of_rx_descriptors; ++i) {
     for (int i = 0; i < number_of_rx_descriptors; ++i) {
         auto& descriptor = rx_descriptors[i];
         auto& descriptor = rx_descriptors[i];
-        m_rx_buffers_regions.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(8192), "E1000 RX buffer", Region::Access::Read | Region::Access::Write));
-        descriptor.addr = m_rx_buffers_regions[i]->vmobject().physical_pages()[0]->paddr().get();
+        auto region = MM.allocate_contiguous_kernel_region(8192, "E1000 RX buffer", Region::Access::Read | Region::Access::Write);
+        ASSERT(region);
+        m_rx_buffers_regions.append(region.release_nonnull());
+        descriptor.addr = m_rx_buffers_regions[i].physical_page(0)->paddr().get();
         descriptor.status = 0;
         descriptor.status = 0;
     }
     }
 
 
-    out32(REG_RXDESCLO, m_rx_descriptors_region->vmobject().physical_pages()[0]->paddr().get());
+    out32(REG_RXDESCLO, m_rx_descriptors_region->physical_page(0)->paddr().get());
     out32(REG_RXDESCHI, 0);
     out32(REG_RXDESCHI, 0);
     out32(REG_RXDESCLEN, number_of_rx_descriptors * sizeof(e1000_rx_desc));
     out32(REG_RXDESCLEN, number_of_rx_descriptors * sizeof(e1000_rx_desc));
     out32(REG_RXDESCHEAD, 0);
     out32(REG_RXDESCHEAD, 0);
@@ -298,12 +300,14 @@ void E1000NetworkAdapter::initialize_tx_descriptors()
     auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr();
     auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr();
     for (int i = 0; i < number_of_tx_descriptors; ++i) {
     for (int i = 0; i < number_of_tx_descriptors; ++i) {
         auto& descriptor = tx_descriptors[i];
         auto& descriptor = tx_descriptors[i];
-        m_tx_buffers_regions.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(8192), "E1000 TX buffer", Region::Access::Read | Region::Access::Write));
-        descriptor.addr = m_tx_buffers_regions[i]->vmobject().physical_pages()[0]->paddr().get();
+        auto region = MM.allocate_contiguous_kernel_region(8192, "E1000 TX buffer", Region::Access::Read | Region::Access::Write);
+        ASSERT(region);
+        m_tx_buffers_regions.append(region.release_nonnull());
+        descriptor.addr = m_tx_buffers_regions[i].physical_page(0)->paddr().get();
         descriptor.cmd = 0;
         descriptor.cmd = 0;
     }
     }
 
 
-    out32(REG_TXDESCLO, m_tx_descriptors_region->vmobject().physical_pages()[0]->paddr().get());
+    out32(REG_TXDESCLO, m_tx_descriptors_region->physical_page(0)->paddr().get());
     out32(REG_TXDESCHI, 0);
     out32(REG_TXDESCHI, 0);
     out32(REG_TXDESCLEN, number_of_tx_descriptors * sizeof(e1000_tx_desc));
     out32(REG_TXDESCLEN, number_of_tx_descriptors * sizeof(e1000_tx_desc));
     out32(REG_TXDESCHEAD, 0);
     out32(REG_TXDESCHEAD, 0);
@@ -392,7 +396,7 @@ void E1000NetworkAdapter::send_raw(const u8* data, size_t length)
     auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr();
     auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr();
     auto& descriptor = tx_descriptors[tx_current];
     auto& descriptor = tx_descriptors[tx_current];
     ASSERT(length <= 8192);
     ASSERT(length <= 8192);
-    auto* vptr = (void*)m_tx_buffers_regions[tx_current]->vaddr().as_ptr();
+    auto* vptr = (void*)m_tx_buffers_regions[tx_current].vaddr().as_ptr();
     memcpy(vptr, data, length);
     memcpy(vptr, data, length);
     descriptor.length = length;
     descriptor.length = length;
     descriptor.status = 0;
     descriptor.status = 0;
@@ -427,7 +431,7 @@ void E1000NetworkAdapter::receive()
         rx_current = (rx_current + 1) % number_of_rx_descriptors;
         rx_current = (rx_current + 1) % number_of_rx_descriptors;
         if (!(rx_descriptors[rx_current].status & 1))
         if (!(rx_descriptors[rx_current].status & 1))
             break;
             break;
-        auto* buffer = m_rx_buffers_regions[rx_current]->vaddr().as_ptr();
+        auto* buffer = m_rx_buffers_regions[rx_current].vaddr().as_ptr();
         u16 length = rx_descriptors[rx_current].length;
         u16 length = rx_descriptors[rx_current].length;
 #ifdef E1000_DEBUG
 #ifdef E1000_DEBUG
         klog() << "E1000: Received 1 packet @ " << buffer << " (" << length << ") bytes!";
         klog() << "E1000: Received 1 packet @ " << buffer << " (" << length << ") bytes!";

+ 3 - 2
Kernel/Net/E1000NetworkAdapter.h

@@ -26,6 +26,7 @@
 
 
 #pragma once
 #pragma once
 
 
+#include <AK/NonnullOwnPtrVector.h>
 #include <AK/OwnPtr.h>
 #include <AK/OwnPtr.h>
 #include <Kernel/Interrupts/IRQHandler.h>
 #include <Kernel/Interrupts/IRQHandler.h>
 #include <Kernel/Net/NetworkAdapter.h>
 #include <Kernel/Net/NetworkAdapter.h>
@@ -96,8 +97,8 @@ private:
     VirtualAddress m_mmio_base;
     VirtualAddress m_mmio_base;
     OwnPtr<Region> m_rx_descriptors_region;
     OwnPtr<Region> m_rx_descriptors_region;
     OwnPtr<Region> m_tx_descriptors_region;
     OwnPtr<Region> m_tx_descriptors_region;
-    Vector<OwnPtr<Region>> m_rx_buffers_regions;
-    Vector<OwnPtr<Region>> m_tx_buffers_regions;
+    NonnullOwnPtrVector<Region> m_rx_buffers_regions;
+    NonnullOwnPtrVector<Region> m_tx_buffers_regions;
     OwnPtr<Region> m_mmio_region;
     OwnPtr<Region> m_mmio_region;
     u8 m_interrupt_line { 0 };
     u8 m_interrupt_line { 0 };
     bool m_has_eeprom { false };
     bool m_has_eeprom { false };

+ 4 - 4
Kernel/Net/RTL8139NetworkAdapter.cpp

@@ -158,11 +158,11 @@ RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address address, u8 irq)
     // we add space to account for overhang from the last packet - the rtl8139
     // we add space to account for overhang from the last packet - the rtl8139
     // can optionally guarantee that packets will be contiguous by
     // can optionally guarantee that packets will be contiguous by
     // purposefully overrunning the rx buffer
     // purposefully overrunning the rx buffer
-    klog() << "RTL8139: RX buffer: " << m_rx_buffer->vmobject().physical_pages()[0]->paddr();
+    klog() << "RTL8139: RX buffer: " << m_rx_buffer->physical_page(0)->paddr();
 
 
     for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) {
     for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) {
         m_tx_buffers.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(TX_BUFFER_SIZE), "RTL8139 TX", Region::Access::Write | Region::Access::Read));
         m_tx_buffers.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(TX_BUFFER_SIZE), "RTL8139 TX", Region::Access::Write | Region::Access::Read));
-        klog() << "RTL8139: TX buffer " << i << ": " << m_tx_buffers[i]->vmobject().physical_pages()[0]->paddr();
+        klog() << "RTL8139: TX buffer " << i << ": " << m_tx_buffers[i]->physical_page(0)->paddr();
     }
     }
 
 
     reset();
     reset();
@@ -250,7 +250,7 @@ void RTL8139NetworkAdapter::reset()
     // device might be in sleep mode, this will take it out
     // device might be in sleep mode, this will take it out
     out8(REG_CONFIG1, 0);
     out8(REG_CONFIG1, 0);
     // set up rx buffer
     // set up rx buffer
-    out32(REG_RXBUF, m_rx_buffer->vmobject().physical_pages()[0]->paddr().get());
+    out32(REG_RXBUF, m_rx_buffer->physical_page(0)->paddr().get());
     // reset missed packet counter
     // reset missed packet counter
     out8(REG_MPC, 0);
     out8(REG_MPC, 0);
     // "basic mode control register" options - 100mbit, full duplex, auto
     // "basic mode control register" options - 100mbit, full duplex, auto
@@ -268,7 +268,7 @@ void RTL8139NetworkAdapter::reset()
     out32(REG_TXCFG, TXCFG_TXRR_ZERO | TXCFG_MAX_DMA_1K | TXCFG_IFG11);
     out32(REG_TXCFG, TXCFG_TXRR_ZERO | TXCFG_MAX_DMA_1K | TXCFG_IFG11);
     // tell the chip where we want it to DMA from for outgoing packets.
     // tell the chip where we want it to DMA from for outgoing packets.
     for (int i = 0; i < 4; i++)
     for (int i = 0; i < 4; i++)
-        out32(REG_TXADDR0 + (i * 4), m_tx_buffers[i]->vmobject().physical_pages()[0]->paddr().get());
+        out32(REG_TXADDR0 + (i * 4), m_tx_buffers[i]->physical_page(0)->paddr().get());
     // re-lock config registers
     // re-lock config registers
     out8(REG_CFG9346, CFG9346_NONE);
     out8(REG_CFG9346, CFG9346_NONE);
     // enable rx/tx again in case they got turned off (apparently some cards
     // enable rx/tx again in case they got turned off (apparently some cards

+ 1 - 1
Kernel/PCI/MMIOAccess.cpp

@@ -141,7 +141,7 @@ void MMIOAccess::map_device(Address address)
     dbg() << "PCI: Mapping device @ pci (" << String::format("%w", address.seg()) << ":" << String::format("%b", address.bus()) << ":" << String::format("%b", address.slot()) << "." << String::format("%b", address.function()) << ")"
     dbg() << "PCI: Mapping device @ pci (" << String::format("%w", address.seg()) << ":" << String::format("%b", address.bus()) << ":" << String::format("%b", address.slot()) << "." << String::format("%b", address.function()) << ")"
           << " V 0x" << String::format("%x", m_mmio_window_region->vaddr().get()) << " P 0x" << String::format("%x", device_physical_mmio_space.get());
           << " V 0x" << String::format("%x", m_mmio_window_region->vaddr().get()) << " P 0x" << String::format("%x", device_physical_mmio_space.get());
 #endif
 #endif
-    m_mmio_window_region->vmobject().physical_pages()[0] = PhysicalPage::create(device_physical_mmio_space, false, false);
+    m_mmio_window_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, false, false);
     m_mmio_window_region->remap();
     m_mmio_window_region->remap();
     m_mapped_address = address;
     m_mapped_address = address;
 }
 }

+ 28 - 28
Kernel/VM/Region.cpp

@@ -131,7 +131,7 @@ bool Region::commit(size_t page_index)
 {
 {
     ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable());
     ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable());
     InterruptDisabler disabler;
     InterruptDisabler disabler;
-    auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index];
+    auto& vmobject_physical_page_entry = physical_page_slot(page_index);
     if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page())
     if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page())
         return true;
         return true;
     auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
     auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
@@ -165,8 +165,8 @@ size_t Region::amount_resident() const
 {
 {
     size_t bytes = 0;
     size_t bytes = 0;
     for (size_t i = 0; i < page_count(); ++i) {
     for (size_t i = 0; i < page_count(); ++i) {
-        auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
-        if (physical_page && !physical_page->is_shared_zero_page())
+        auto* page = physical_page(i);
+        if (page && !page->is_shared_zero_page())
             bytes += PAGE_SIZE;
             bytes += PAGE_SIZE;
     }
     }
     return bytes;
     return bytes;
@@ -176,8 +176,8 @@ size_t Region::amount_shared() const
 {
 {
     size_t bytes = 0;
     size_t bytes = 0;
     for (size_t i = 0; i < page_count(); ++i) {
     for (size_t i = 0; i < page_count(); ++i) {
-        auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
-        if (physical_page && physical_page->ref_count() > 1 && !physical_page->is_shared_zero_page())
+        auto* page = physical_page(i);
+        if (page && page->ref_count() > 1 && !page->is_shared_zero_page())
             bytes += PAGE_SIZE;
             bytes += PAGE_SIZE;
     }
     }
     return bytes;
     return bytes;
@@ -199,8 +199,8 @@ NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefP
 
 
 bool Region::should_cow(size_t page_index) const
 bool Region::should_cow(size_t page_index) const
 {
 {
-    auto& slot = vmobject().physical_pages()[page_index];
-    if (slot && slot->is_shared_zero_page())
+    auto* page = physical_page(page_index);
+    if (page && page->is_shared_zero_page())
         return true;
         return true;
     if (m_shared)
     if (m_shared)
         return false;
         return false;
@@ -224,12 +224,12 @@ void Region::map_individual_page_impl(size_t page_index)
 {
 {
     auto page_vaddr = vaddr().offset(page_index * PAGE_SIZE);
     auto page_vaddr = vaddr().offset(page_index * PAGE_SIZE);
     auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
     auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
-    auto& physical_page = vmobject().physical_pages()[first_page_index() + page_index];
-    if (!physical_page || (!is_readable() && !is_writable())) {
+    auto* page = physical_page(page_index);
+    if (!page || (!is_readable() && !is_writable())) {
         pte.clear();
         pte.clear();
     } else {
     } else {
         pte.set_cache_disabled(!m_cacheable);
         pte.set_cache_disabled(!m_cacheable);
-        pte.set_physical_page_base(physical_page->paddr().get());
+        pte.set_physical_page_base(page->paddr().get());
         pte.set_present(true);
         pte.set_present(true);
         if (should_cow(page_index))
         if (should_cow(page_index))
             pte.set_writable(false);
             pte.set_writable(false);
@@ -239,7 +239,7 @@ void Region::map_individual_page_impl(size_t page_index)
             pte.set_execute_disabled(!is_executable());
             pte.set_execute_disabled(!is_executable());
         pte.set_user_allowed(is_user_accessible());
         pte.set_user_allowed(is_user_accessible());
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
-        dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
+        dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")";
 #endif
 #endif
     }
     }
     MM.flush_tlb(page_vaddr);
     MM.flush_tlb(page_vaddr);
@@ -249,7 +249,7 @@ void Region::remap_page(size_t page_index)
 {
 {
     ASSERT(m_page_directory);
     ASSERT(m_page_directory);
     InterruptDisabler disabler;
     InterruptDisabler disabler;
-    ASSERT(vmobject().physical_pages()[first_page_index() + page_index]);
+    ASSERT(physical_page(page_index));
     map_individual_page_impl(page_index);
     map_individual_page_impl(page_index);
 }
 }
 
 
@@ -263,8 +263,8 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
         pte.clear();
         pte.clear();
         MM.flush_tlb(vaddr);
         MM.flush_tlb(vaddr);
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
-        auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
-        dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", physical_page ? physical_page->paddr().get() : 0) << " <<";
+        auto* page = physical_page(i);
+        dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
 #endif
 #endif
     }
     }
     if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
     if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
@@ -315,7 +315,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
         }
         }
 #ifdef MAP_SHARED_ZERO_PAGE_LAZILY
 #ifdef MAP_SHARED_ZERO_PAGE_LAZILY
         if (fault.is_read()) {
         if (fault.is_read()) {
-            vmobject().physical_pages()[first_page_index() + page_index_in_region] = MM.shared_zero_page();
+            physical_page_slot(page_index_in_region) = MM.shared_zero_page();
             remap_page(page_index_in_region);
             remap_page(page_index_in_region);
             return PageFaultResponse::Continue;
             return PageFaultResponse::Continue;
         }
         }
@@ -330,7 +330,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
         dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "]";
         dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "]";
 #endif
 #endif
-        if (vmobject().physical_pages()[first_page_index() + page_index_in_region]->is_shared_zero_page()) {
+        if (physical_page(page_index_in_region)->is_shared_zero_page()) {
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
             dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "]";
             dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "]";
 #endif
 #endif
@@ -351,9 +351,9 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
     LOCKER(vmobject().m_paging_lock);
     LOCKER(vmobject().m_paging_lock);
     cli();
     cli();
 
 
-    auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
+    auto& page_slot = physical_page_slot(page_index_in_region);
 
 
-    if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) {
+    if (!page_slot.is_null() && !page_slot->is_shared_zero_page()) {
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
         dbg() << "MM: zero_page() but page already present. Fine with me!";
         dbg() << "MM: zero_page() but page already present. Fine with me!";
 #endif
 #endif
@@ -364,8 +364,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
     if (Thread::current)
     if (Thread::current)
         Thread::current->did_zero_fault();
         Thread::current->did_zero_fault();
 
 
-    auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
-    if (physical_page.is_null()) {
+    auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
+    if (page.is_null()) {
         klog() << "MM: handle_zero_fault was unable to allocate a physical page";
         klog() << "MM: handle_zero_fault was unable to allocate a physical page";
         return PageFaultResponse::ShouldCrash;
         return PageFaultResponse::ShouldCrash;
     }
     }
@@ -373,7 +373,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
     dbg() << "      >> ZERO " << physical_page->paddr();
     dbg() << "      >> ZERO " << physical_page->paddr();
 #endif
 #endif
-    vmobject_physical_page_entry = move(physical_page);
+    page_slot = move(page);
     remap_page(page_index_in_region);
     remap_page(page_index_in_region);
     return PageFaultResponse::Continue;
     return PageFaultResponse::Continue;
 }
 }
@@ -381,8 +381,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
 PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
 PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
 {
 {
     ASSERT_INTERRUPTS_DISABLED();
     ASSERT_INTERRUPTS_DISABLED();
-    auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
-    if (vmobject_physical_page_entry->ref_count() == 1) {
+    auto& page_slot = physical_page_slot(page_index_in_region);
+    if (page_slot->ref_count() == 1) {
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
         dbg() << "    >> It's a COW page but nobody is sharing it anymore. Remap r/w";
         dbg() << "    >> It's a COW page but nobody is sharing it anymore. Remap r/w";
 #endif
 #endif
@@ -397,19 +397,19 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
     dbg() << "    >> It's a COW page and it's time to COW!";
     dbg() << "    >> It's a COW page and it's time to COW!";
 #endif
 #endif
-    auto physical_page_to_copy = move(vmobject_physical_page_entry);
-    auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
-    if (physical_page.is_null()) {
+    auto physical_page_to_copy = move(page_slot);
+    auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
+    if (page.is_null()) {
         klog() << "MM: handle_cow_fault was unable to allocate a physical page";
         klog() << "MM: handle_cow_fault was unable to allocate a physical page";
         return PageFaultResponse::ShouldCrash;
         return PageFaultResponse::ShouldCrash;
     }
     }
-    u8* dest_ptr = MM.quickmap_page(*physical_page);
+    u8* dest_ptr = MM.quickmap_page(*page);
     const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
     const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
     dbg() << "      >> COW " << physical_page->paddr() << " <- " << physical_page_to_copy->paddr();
     dbg() << "      >> COW " << physical_page->paddr() << " <- " << physical_page_to_copy->paddr();
 #endif
 #endif
     copy_from_user(dest_ptr, src_ptr, PAGE_SIZE);
     copy_from_user(dest_ptr, src_ptr, PAGE_SIZE);
-    vmobject_physical_page_entry = move(physical_page);
+    page_slot = move(page);
     MM.unquickmap_page();
     MM.unquickmap_page();
     set_should_cow(page_index_in_region, false);
     set_should_cow(page_index_in_region, false);
     remap_page(page_index_in_region);
     remap_page(page_index_in_region);

+ 12 - 0
Kernel/VM/Region.h

@@ -128,6 +128,18 @@ public:
         return size() / PAGE_SIZE;
         return size() / PAGE_SIZE;
     }
     }
 
 
+    const PhysicalPage* physical_page(size_t index) const
+    {
+        ASSERT(index < page_count());
+        return vmobject().physical_pages()[first_page_index() + index];
+    }
+
+    RefPtr<PhysicalPage>& physical_page_slot(size_t index)
+    {
+        ASSERT(index < page_count());
+        return vmobject().physical_pages()[first_page_index() + index];
+    }
+
     size_t offset_in_vmobject() const
     size_t offset_in_vmobject() const
     {
     {
         return m_offset_in_vmobject;
         return m_offset_in_vmobject;