VMObject.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #include <Kernel/FileSystem/FileSystem.h>
  2. #include <Kernel/FileSystem/Inode.h>
  3. #include <Kernel/VM/MemoryManager.h>
  4. #include <Kernel/VM/VMObject.h>
  5. NonnullRefPtr<VMObject> VMObject::create_file_backed(RefPtr<Inode>&& inode)
  6. {
  7. InterruptDisabler disabler;
  8. if (inode->vmo())
  9. return *inode->vmo();
  10. auto vmo = adopt(*new VMObject(move(inode)));
  11. vmo->inode()->set_vmo(*vmo);
  12. return vmo;
  13. }
  14. NonnullRefPtr<VMObject> VMObject::create_anonymous(size_t size)
  15. {
  16. size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
  17. return adopt(*new VMObject(size));
  18. }
  19. NonnullRefPtr<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
  20. {
  21. size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
  22. auto vmo = adopt(*new VMObject(paddr, size));
  23. vmo->m_allow_cpu_caching = false;
  24. return vmo;
  25. }
  26. NonnullRefPtr<VMObject> VMObject::clone()
  27. {
  28. return adopt(*new VMObject(*this));
  29. }
  30. VMObject::VMObject(VMObject& other)
  31. : m_name(other.m_name)
  32. , m_inode_offset(other.m_inode_offset)
  33. , m_size(other.m_size)
  34. , m_inode(other.m_inode)
  35. , m_physical_pages(other.m_physical_pages)
  36. {
  37. MM.register_vmo(*this);
  38. }
  39. VMObject::VMObject(size_t size)
  40. : m_size(size)
  41. {
  42. MM.register_vmo(*this);
  43. m_physical_pages.resize(page_count());
  44. }
  45. VMObject::VMObject(PhysicalAddress paddr, size_t size)
  46. : m_size(size)
  47. {
  48. MM.register_vmo(*this);
  49. for (size_t i = 0; i < size; i += PAGE_SIZE) {
  50. m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false));
  51. }
  52. ASSERT(m_physical_pages.size() == page_count());
  53. }
  54. VMObject::VMObject(RefPtr<Inode>&& inode)
  55. : m_inode(move(inode))
  56. {
  57. ASSERT(m_inode);
  58. m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
  59. m_physical_pages.resize(page_count());
  60. MM.register_vmo(*this);
  61. }
  62. VMObject::~VMObject()
  63. {
  64. if (m_inode)
  65. ASSERT(m_inode->vmo() == this);
  66. MM.unregister_vmo(*this);
  67. }
  68. template<typename Callback>
  69. void VMObject::for_each_region(Callback callback)
  70. {
  71. // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
  72. // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
  73. for (auto* region : MM.m_user_regions) {
  74. if (&region->vmo() == this)
  75. callback(*region);
  76. }
  77. for (auto* region : MM.m_kernel_regions) {
  78. if (&region->vmo() == this)
  79. callback(*region);
  80. }
  81. }
  82. void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
  83. {
  84. dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
  85. m_inode->fsid(), m_inode->index(),
  86. old_size, new_size);
  87. InterruptDisabler disabler;
  88. auto old_page_count = page_count();
  89. m_size = new_size;
  90. if (page_count() > old_page_count) {
  91. // Add null pages and let the fault handler page these in when that day comes.
  92. for (auto i = old_page_count; i < page_count(); ++i)
  93. m_physical_pages.append(nullptr);
  94. } else {
  95. // Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
  96. for (auto i = page_count(); i < old_page_count; ++i)
  97. m_physical_pages.take_last();
  98. }
  99. // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
  100. for_each_region([](Region& region) {
  101. ASSERT(region.page_directory());
  102. MM.remap_region(*region.page_directory(), region);
  103. });
  104. }
  105. void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const byte* data)
  106. {
  107. (void)size;
  108. (void)data;
  109. InterruptDisabler disabler;
  110. ASSERT(offset >= 0);
  111. // FIXME: Only invalidate the parts that actually changed.
  112. for (auto& physical_page : m_physical_pages)
  113. physical_page = nullptr;
  114. #if 0
  115. size_t current_offset = offset;
  116. size_t remaining_bytes = size;
  117. const byte* data_ptr = data;
  118. auto to_page_index = [] (size_t offset) -> size_t {
  119. return offset / PAGE_SIZE;
  120. };
  121. if (current_offset & PAGE_MASK) {
  122. size_t page_index = to_page_index(current_offset);
  123. size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
  124. if (m_physical_pages[page_index]) {
  125. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  126. memcpy(ptr, data_ptr, bytes_to_copy);
  127. MM.unquickmap_page();
  128. }
  129. current_offset += bytes_to_copy;
  130. data += bytes_to_copy;
  131. remaining_bytes -= bytes_to_copy;
  132. }
  133. for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
  134. size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
  135. if (m_physical_pages[page_index]) {
  136. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  137. memcpy(ptr, data_ptr, bytes_to_copy);
  138. MM.unquickmap_page();
  139. }
  140. current_offset += bytes_to_copy;
  141. data += bytes_to_copy;
  142. }
  143. #endif
  144. // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
  145. for_each_region([](Region& region) {
  146. ASSERT(region.page_directory());
  147. MM.remap_region(*region.page_directory(), region);
  148. });
  149. }