Region.cpp 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. #include <Kernel/FileSystem/Inode.h>
  2. #include <Kernel/Process.h>
  3. #include <Kernel/Thread.h>
  4. #include <Kernel/VM/AnonymousVMObject.h>
  5. #include <Kernel/VM/InodeVMObject.h>
  6. #include <Kernel/VM/MemoryManager.h>
  7. #include <Kernel/VM/Region.h>
  8. Region::Region(const Range& range, const String& name, u8 access, bool cow)
  9. : m_range(range)
  10. , m_vmobject(AnonymousVMObject::create_with_size(size()))
  11. , m_name(name)
  12. , m_access(access)
  13. , m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
  14. {
  15. MM.register_region(*this);
  16. }
  17. Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
  18. : m_range(range)
  19. , m_vmobject(InodeVMObject::create_with_inode(*inode))
  20. , m_name(name)
  21. , m_access(access)
  22. , m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
  23. {
  24. MM.register_region(*this);
  25. }
  26. Region::Region(const Range& range, NonnullRefPtr<VMObject> vmo, size_t offset_in_vmo, const String& name, u8 access, bool cow)
  27. : m_range(range)
  28. , m_offset_in_vmo(offset_in_vmo)
  29. , m_vmobject(move(vmo))
  30. , m_name(name)
  31. , m_access(access)
  32. , m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
  33. {
  34. MM.register_region(*this);
  35. }
  36. Region::~Region()
  37. {
  38. // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
  39. // Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
  40. // find the address<->region mappings in an invalid state there.
  41. InterruptDisabler disabler;
  42. if (m_page_directory) {
  43. MM.unmap_region(*this);
  44. ASSERT(!m_page_directory);
  45. }
  46. MM.unregister_region(*this);
  47. }
  48. NonnullOwnPtr<Region> Region::clone()
  49. {
  50. ASSERT(current);
  51. // NOTE: Kernel-only regions should never be cloned.
  52. ASSERT(is_user_accessible());
  53. if (m_shared || (is_readable() && !is_writable())) {
  54. #ifdef MM_DEBUG
  55. dbgprintf("%s<%u> Region::clone(): sharing %s (V%p)\n",
  56. current->process().name().characters(),
  57. current->pid(),
  58. m_name.characters(),
  59. vaddr().get());
  60. #endif
  61. // Create a new region backed by the same VMObject.
  62. return Region::create_user_accessible(m_range, m_vmobject, m_offset_in_vmo, m_name, m_access);
  63. }
  64. #ifdef MM_DEBUG
  65. dbgprintf("%s<%u> Region::clone(): cowing %s (V%p)\n",
  66. current->process().name().characters(),
  67. current->pid(),
  68. m_name.characters(),
  69. vaddr().get());
  70. #endif
  71. // Set up a COW region. The parent (this) region becomes COW as well!
  72. m_cow_map.fill(true);
  73. MM.remap_region(current->process().page_directory(), *this);
  74. return Region::create_user_accessible(m_range, m_vmobject->clone(), m_offset_in_vmo, m_name, m_access, true);
  75. }
  76. int Region::commit()
  77. {
  78. InterruptDisabler disabler;
  79. #ifdef MM_DEBUG
  80. dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get());
  81. #endif
  82. for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
  83. if (!vmobject().physical_pages()[i].is_null())
  84. continue;
  85. auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
  86. if (!physical_page) {
  87. kprintf("MM: commit was unable to allocate a physical page\n");
  88. return -ENOMEM;
  89. }
  90. vmobject().physical_pages()[i] = move(physical_page);
  91. MM.remap_region_page(*this, i);
  92. }
  93. return 0;
  94. }
  95. size_t Region::amount_resident() const
  96. {
  97. size_t bytes = 0;
  98. for (size_t i = 0; i < page_count(); ++i) {
  99. if (m_vmobject->physical_pages()[first_page_index() + i])
  100. bytes += PAGE_SIZE;
  101. }
  102. return bytes;
  103. }
  104. size_t Region::amount_shared() const
  105. {
  106. size_t bytes = 0;
  107. for (size_t i = 0; i < page_count(); ++i) {
  108. auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
  109. if (physical_page && physical_page->ref_count() > 1)
  110. bytes += PAGE_SIZE;
  111. }
  112. return bytes;
  113. }
  114. NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access, bool cow)
  115. {
  116. auto region = make<Region>(range, name, access, cow);
  117. region->m_user_accessible = true;
  118. return region;
  119. }
  120. NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cow)
  121. {
  122. auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cow);
  123. region->m_user_accessible = true;
  124. return region;
  125. }
  126. NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access, bool cow)
  127. {
  128. auto region = make<Region>(range, move(inode), name, access, cow);
  129. region->m_user_accessible = true;
  130. return region;
  131. }
  132. NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access, bool cow)
  133. {
  134. auto region = make<Region>(range, name, access, cow);
  135. region->m_user_accessible = false;
  136. return region;
  137. }