|
@@ -1,10 +1,9 @@
|
|
|
-#include "MemoryManager.h"
|
|
|
+#include <Kernel/VM/MemoryManager.h>
|
|
|
#include <AK/Assertions.h>
|
|
|
#include <AK/kstdio.h>
|
|
|
#include "i386.h"
|
|
|
#include "StdLib.h"
|
|
|
#include "Process.h"
|
|
|
-#include <LibC/errno_numbers.h>
|
|
|
#include "CMOS.h"
|
|
|
|
|
|
//#define MM_DEBUG
|
|
@@ -43,16 +42,6 @@ MemoryManager::~MemoryManager()
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-PageDirectory::PageDirectory(PhysicalAddress paddr)
|
|
|
-{
|
|
|
- m_directory_page = PhysicalPage::create_eternal(paddr, true);
|
|
|
-}
|
|
|
-
|
|
|
-PageDirectory::PageDirectory()
|
|
|
-{
|
|
|
- MM.populate_page_directory(*this);
|
|
|
-}
|
|
|
-
|
|
|
void MemoryManager::populate_page_directory(PageDirectory& page_directory)
|
|
|
{
|
|
|
page_directory.m_directory_page = allocate_supervisor_physical_page();
|
|
@@ -296,25 +285,6 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-bool Region::page_in()
|
|
|
-{
|
|
|
- ASSERT(m_page_directory);
|
|
|
- ASSERT(!vmo().is_anonymous());
|
|
|
- ASSERT(vmo().inode());
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("MM: page_in %u pages\n", page_count());
|
|
|
-#endif
|
|
|
- for (size_t i = 0; i < page_count(); ++i) {
|
|
|
- auto& vmo_page = vmo().physical_pages()[first_page_index() + i];
|
|
|
- if (vmo_page.is_null()) {
|
|
|
- bool success = MM.page_in_from_inode(*this, i);
|
|
|
- if (!success)
|
|
|
- return false;
|
|
|
- }
|
|
|
- MM.remap_region_page(*this, i, true);
|
|
|
- }
|
|
|
- return true;
|
|
|
-}
|
|
|
|
|
|
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
|
|
|
{
|
|
@@ -625,305 +595,6 @@ bool MemoryManager::validate_user_write(const Process& process, LinearAddress la
|
|
|
return region && region->is_writable();
|
|
|
}
|
|
|
|
|
|
-Retained<Region> Region::clone()
|
|
|
-{
|
|
|
- ASSERT(current);
|
|
|
- if (m_shared || (m_readable && !m_writable)) {
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("%s<%u> Region::clone(): sharing %s (L%x)\n",
|
|
|
- current->process().name().characters(),
|
|
|
- current->pid(),
|
|
|
- m_name.characters(),
|
|
|
- laddr().get());
|
|
|
-#endif
|
|
|
- // Create a new region backed by the same VMObject.
|
|
|
- return adopt(*new Region(laddr(), size(), m_vmo.copy_ref(), m_offset_in_vmo, String(m_name), m_readable, m_writable));
|
|
|
- }
|
|
|
-
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("%s<%u> Region::clone(): cowing %s (L%x)\n",
|
|
|
- current->process().name().characters(),
|
|
|
- current->pid(),
|
|
|
- m_name.characters(),
|
|
|
- laddr().get());
|
|
|
-#endif
|
|
|
- // Set up a COW region. The parent (this) region becomes COW as well!
|
|
|
- for (size_t i = 0; i < page_count(); ++i)
|
|
|
- m_cow_map.set(i, true);
|
|
|
- MM.remap_region(current->process().page_directory(), *this);
|
|
|
- return adopt(*new Region(laddr(), size(), m_vmo->clone(), m_offset_in_vmo, String(m_name), m_readable, m_writable, true));
|
|
|
-}
|
|
|
-
|
|
|
-Region::Region(LinearAddress a, size_t s, String&& n, bool r, bool w, bool cow)
|
|
|
- : m_laddr(a)
|
|
|
- , m_size(s)
|
|
|
- , m_vmo(VMObject::create_anonymous(s))
|
|
|
- , m_name(move(n))
|
|
|
- , m_readable(r)
|
|
|
- , m_writable(w)
|
|
|
- , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
|
|
-{
|
|
|
- m_vmo->set_name(m_name);
|
|
|
- MM.register_region(*this);
|
|
|
-}
|
|
|
-
|
|
|
-Region::Region(LinearAddress a, size_t s, RetainPtr<Inode>&& inode, String&& n, bool r, bool w)
|
|
|
- : m_laddr(a)
|
|
|
- , m_size(s)
|
|
|
- , m_vmo(VMObject::create_file_backed(move(inode)))
|
|
|
- , m_name(move(n))
|
|
|
- , m_readable(r)
|
|
|
- , m_writable(w)
|
|
|
- , m_cow_map(Bitmap::create(m_vmo->page_count()))
|
|
|
-{
|
|
|
- MM.register_region(*this);
|
|
|
-}
|
|
|
-
|
|
|
-Region::Region(LinearAddress a, size_t s, Retained<VMObject>&& vmo, size_t offset_in_vmo, String&& n, bool r, bool w, bool cow)
|
|
|
- : m_laddr(a)
|
|
|
- , m_size(s)
|
|
|
- , m_offset_in_vmo(offset_in_vmo)
|
|
|
- , m_vmo(move(vmo))
|
|
|
- , m_name(move(n))
|
|
|
- , m_readable(r)
|
|
|
- , m_writable(w)
|
|
|
- , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
|
|
-{
|
|
|
- MM.register_region(*this);
|
|
|
-}
|
|
|
-
|
|
|
-Region::~Region()
|
|
|
-{
|
|
|
- if (m_page_directory) {
|
|
|
- MM.unmap_region(*this);
|
|
|
- ASSERT(!m_page_directory);
|
|
|
- }
|
|
|
- MM.unregister_region(*this);
|
|
|
-}
|
|
|
-
|
|
|
-Retained<PhysicalPage> PhysicalPage::create_eternal(PhysicalAddress paddr, bool supervisor)
|
|
|
-{
|
|
|
- void* slot = kmalloc_eternal(sizeof(PhysicalPage));
|
|
|
- new (slot) PhysicalPage(paddr, supervisor);
|
|
|
- return adopt(*(PhysicalPage*)slot);
|
|
|
-}
|
|
|
-
|
|
|
-Retained<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor)
|
|
|
-{
|
|
|
- void* slot = kmalloc(sizeof(PhysicalPage));
|
|
|
- new (slot) PhysicalPage(paddr, supervisor, false);
|
|
|
- return adopt(*(PhysicalPage*)slot);
|
|
|
-}
|
|
|
-
|
|
|
-PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
|
|
|
- : m_may_return_to_freelist(may_return_to_freelist)
|
|
|
- , m_supervisor(supervisor)
|
|
|
- , m_paddr(paddr)
|
|
|
-{
|
|
|
- if (supervisor)
|
|
|
- ++MemoryManager::s_super_physical_pages_in_existence;
|
|
|
- else
|
|
|
- ++MemoryManager::s_user_physical_pages_in_existence;
|
|
|
-}
|
|
|
-
|
|
|
-void PhysicalPage::return_to_freelist()
|
|
|
-{
|
|
|
- ASSERT((paddr().get() & ~PAGE_MASK) == 0);
|
|
|
- InterruptDisabler disabler;
|
|
|
- m_retain_count = 1;
|
|
|
- if (m_supervisor)
|
|
|
- MM.m_free_supervisor_physical_pages.append(adopt(*this));
|
|
|
- else
|
|
|
- MM.m_free_physical_pages.append(adopt(*this));
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("MM: P%x released to freelist\n", m_paddr.get());
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
-Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
|
|
|
-{
|
|
|
- InterruptDisabler disabler;
|
|
|
- if (inode->vmo())
|
|
|
- return *inode->vmo();
|
|
|
- auto vmo = adopt(*new VMObject(move(inode)));
|
|
|
- vmo->inode()->set_vmo(*vmo);
|
|
|
- return vmo;
|
|
|
-}
|
|
|
-
|
|
|
-Retained<VMObject> VMObject::create_anonymous(size_t size)
|
|
|
-{
|
|
|
- size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
|
|
- return adopt(*new VMObject(size));
|
|
|
-}
|
|
|
-
|
|
|
-Retained<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
|
|
-{
|
|
|
- size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
|
|
- auto vmo = adopt(*new VMObject(paddr, size));
|
|
|
- vmo->m_allow_cpu_caching = false;
|
|
|
- return vmo;
|
|
|
-}
|
|
|
-
|
|
|
-Retained<VMObject> VMObject::clone()
|
|
|
-{
|
|
|
- return adopt(*new VMObject(*this));
|
|
|
-}
|
|
|
-
|
|
|
-VMObject::VMObject(VMObject& other)
|
|
|
- : m_name(other.m_name)
|
|
|
- , m_anonymous(other.m_anonymous)
|
|
|
- , m_inode_offset(other.m_inode_offset)
|
|
|
- , m_size(other.m_size)
|
|
|
- , m_inode(other.m_inode)
|
|
|
- , m_physical_pages(other.m_physical_pages)
|
|
|
-{
|
|
|
- MM.register_vmo(*this);
|
|
|
-}
|
|
|
-
|
|
|
-VMObject::VMObject(size_t size)
|
|
|
- : m_anonymous(true)
|
|
|
- , m_size(size)
|
|
|
-{
|
|
|
- MM.register_vmo(*this);
|
|
|
- m_physical_pages.resize(page_count());
|
|
|
-}
|
|
|
-
|
|
|
-VMObject::VMObject(PhysicalAddress paddr, size_t size)
|
|
|
- : m_anonymous(true)
|
|
|
- , m_size(size)
|
|
|
-{
|
|
|
- MM.register_vmo(*this);
|
|
|
- for (size_t i = 0; i < size; i += PAGE_SIZE) {
|
|
|
- m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false));
|
|
|
- }
|
|
|
- ASSERT(m_physical_pages.size() == page_count());
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-VMObject::VMObject(RetainPtr<Inode>&& inode)
|
|
|
- : m_inode(move(inode))
|
|
|
-{
|
|
|
- ASSERT(m_inode);
|
|
|
- m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
|
|
|
- m_physical_pages.resize(page_count());
|
|
|
- MM.register_vmo(*this);
|
|
|
-}
|
|
|
-
|
|
|
-VMObject::~VMObject()
|
|
|
-{
|
|
|
- if (m_inode)
|
|
|
- ASSERT(m_inode->vmo() == this);
|
|
|
- MM.unregister_vmo(*this);
|
|
|
-}
|
|
|
-
|
|
|
-template<typename Callback>
|
|
|
-void VMObject::for_each_region(Callback callback)
|
|
|
-{
|
|
|
- // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
|
|
- // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
|
|
- for (auto* region : MM.m_regions) {
|
|
|
- if (®ion->vmo() == this)
|
|
|
- callback(*region);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
|
|
-{
|
|
|
- (void)old_size;
|
|
|
- InterruptDisabler disabler;
|
|
|
-
|
|
|
- size_t old_page_count = page_count();
|
|
|
- m_size = new_size;
|
|
|
-
|
|
|
- if (page_count() > old_page_count) {
|
|
|
- // Add null pages and let the fault handler page these in when that day comes.
|
|
|
- for (size_t i = old_page_count; i < page_count(); ++i)
|
|
|
- m_physical_pages.append(nullptr);
|
|
|
- } else {
|
|
|
- // Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
|
|
|
- for (size_t i = page_count(); i < old_page_count; ++i)
|
|
|
- m_physical_pages.take_last();
|
|
|
- }
|
|
|
-
|
|
|
- // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
|
|
- for_each_region([] (Region& region) {
|
|
|
- ASSERT(region.page_directory());
|
|
|
- MM.remap_region(*region.page_directory(), region);
|
|
|
- });
|
|
|
-}
|
|
|
-
|
|
|
-void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const byte* data)
|
|
|
-{
|
|
|
- (void)size;
|
|
|
- (void)data;
|
|
|
- InterruptDisabler disabler;
|
|
|
- ASSERT(offset >= 0);
|
|
|
-
|
|
|
- // FIXME: Only invalidate the parts that actually changed.
|
|
|
- for (auto& physical_page : m_physical_pages)
|
|
|
- physical_page = nullptr;
|
|
|
-
|
|
|
-#if 0
|
|
|
- size_t current_offset = offset;
|
|
|
- size_t remaining_bytes = size;
|
|
|
- const byte* data_ptr = data;
|
|
|
-
|
|
|
- auto to_page_index = [] (size_t offset) -> size_t {
|
|
|
- return offset / PAGE_SIZE;
|
|
|
- };
|
|
|
-
|
|
|
- if (current_offset & PAGE_MASK) {
|
|
|
- size_t page_index = to_page_index(current_offset);
|
|
|
- size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
|
|
|
- if (m_physical_pages[page_index]) {
|
|
|
- auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
|
|
- memcpy(ptr, data_ptr, bytes_to_copy);
|
|
|
- MM.unquickmap_page();
|
|
|
- }
|
|
|
- current_offset += bytes_to_copy;
|
|
|
- data += bytes_to_copy;
|
|
|
- remaining_bytes -= bytes_to_copy;
|
|
|
- }
|
|
|
-
|
|
|
- for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
|
|
|
- size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
|
|
|
- if (m_physical_pages[page_index]) {
|
|
|
- auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
|
|
- memcpy(ptr, data_ptr, bytes_to_copy);
|
|
|
- MM.unquickmap_page();
|
|
|
- }
|
|
|
- current_offset += bytes_to_copy;
|
|
|
- data += bytes_to_copy;
|
|
|
- }
|
|
|
-#endif
|
|
|
-
|
|
|
- // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
|
|
|
- for_each_region([] (Region& region) {
|
|
|
- ASSERT(region.page_directory());
|
|
|
- MM.remap_region(*region.page_directory(), region);
|
|
|
- });
|
|
|
-}
|
|
|
-
|
|
|
-int Region::commit()
|
|
|
-{
|
|
|
- InterruptDisabler disabler;
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at L%x\n", vmo().page_count(), this, &vmo(), laddr().get());
|
|
|
-#endif
|
|
|
- for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
|
|
|
- if (!vmo().physical_pages()[i].is_null())
|
|
|
- continue;
|
|
|
- auto physical_page = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
|
|
- if (!physical_page) {
|
|
|
- kprintf("MM: commit was unable to allocate a physical page\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- vmo().physical_pages()[i] = move(physical_page);
|
|
|
- MM.remap_region_page(*this, i, true);
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
void MemoryManager::register_vmo(VMObject& vmo)
|
|
|
{
|
|
|
InterruptDisabler disabler;
|
|
@@ -948,45 +619,6 @@ void MemoryManager::unregister_region(Region& region)
|
|
|
m_regions.remove(®ion);
|
|
|
}
|
|
|
|
|
|
-size_t Region::amount_resident() const
|
|
|
-{
|
|
|
- size_t bytes = 0;
|
|
|
- for (size_t i = 0; i < page_count(); ++i) {
|
|
|
- if (m_vmo->physical_pages()[first_page_index() + i])
|
|
|
- bytes += PAGE_SIZE;
|
|
|
- }
|
|
|
- return bytes;
|
|
|
-}
|
|
|
-
|
|
|
-size_t Region::amount_shared() const
|
|
|
-{
|
|
|
- size_t bytes = 0;
|
|
|
- for (size_t i = 0; i < page_count(); ++i) {
|
|
|
- auto& physical_page = m_vmo->physical_pages()[first_page_index() + i];
|
|
|
- if (physical_page && physical_page->retain_count() > 1)
|
|
|
- bytes += PAGE_SIZE;
|
|
|
- }
|
|
|
- return bytes;
|
|
|
-}
|
|
|
-
|
|
|
-PageDirectory::~PageDirectory()
|
|
|
-{
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("MM: ~PageDirectory K%x\n", this);
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
-void PageDirectory::flush(LinearAddress laddr)
|
|
|
-{
|
|
|
-#ifdef MM_DEBUG
|
|
|
- dbgprintf("MM: Flush page L%x\n", laddr.get());
|
|
|
-#endif
|
|
|
- if (!current)
|
|
|
- return;
|
|
|
- if (¤t->process().page_directory() == this)
|
|
|
- MM.flush_tlb(laddr);
|
|
|
-}
|
|
|
-
|
|
|
ProcessPagingScope::ProcessPagingScope(Process& process)
|
|
|
{
|
|
|
ASSERT(current);
|