VMObject.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #include <Kernel/VM/VMObject.h>
  2. #include <Kernel/VM/MemoryManager.h>
  3. #include <FileSystem/FileSystem.h>
  4. Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
  5. {
  6. InterruptDisabler disabler;
  7. if (inode->vmo())
  8. return *inode->vmo();
  9. auto vmo = adopt(*new VMObject(move(inode)));
  10. vmo->inode()->set_vmo(*vmo);
  11. return vmo;
  12. }
  13. Retained<VMObject> VMObject::create_anonymous(size_t size)
  14. {
  15. size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
  16. return adopt(*new VMObject(size));
  17. }
  18. Retained<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
  19. {
  20. size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
  21. auto vmo = adopt(*new VMObject(paddr, size));
  22. vmo->m_allow_cpu_caching = false;
  23. return vmo;
  24. }
  25. Retained<VMObject> VMObject::clone()
  26. {
  27. return adopt(*new VMObject(*this));
  28. }
  29. VMObject::VMObject(VMObject& other)
  30. : m_name(other.m_name)
  31. , m_inode_offset(other.m_inode_offset)
  32. , m_size(other.m_size)
  33. , m_inode(other.m_inode)
  34. , m_physical_pages(other.m_physical_pages)
  35. {
  36. MM.register_vmo(*this);
  37. }
  38. VMObject::VMObject(size_t size)
  39. : m_size(size)
  40. {
  41. MM.register_vmo(*this);
  42. m_physical_pages.resize(page_count());
  43. }
  44. VMObject::VMObject(PhysicalAddress paddr, size_t size)
  45. : m_size(size)
  46. {
  47. MM.register_vmo(*this);
  48. for (size_t i = 0; i < size; i += PAGE_SIZE) {
  49. m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false));
  50. }
  51. ASSERT(m_physical_pages.size() == page_count());
  52. }
  53. VMObject::VMObject(RetainPtr<Inode>&& inode)
  54. : m_inode(move(inode))
  55. {
  56. ASSERT(m_inode);
  57. m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
  58. m_physical_pages.resize(page_count());
  59. MM.register_vmo(*this);
  60. }
  61. VMObject::~VMObject()
  62. {
  63. if (m_inode)
  64. ASSERT(m_inode->vmo() == this);
  65. MM.unregister_vmo(*this);
  66. }
  67. template<typename Callback>
  68. void VMObject::for_each_region(Callback callback)
  69. {
  70. // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
  71. // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
  72. for (auto* region : MM.m_user_regions) {
  73. if (&region->vmo() == this)
  74. callback(*region);
  75. }
  76. for (auto* region : MM.m_kernel_regions) {
  77. if (&region->vmo() == this)
  78. callback(*region);
  79. }
  80. }
  81. void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
  82. {
  83. dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
  84. m_inode->fsid(), m_inode->index(),
  85. old_size, new_size);
  86. InterruptDisabler disabler;
  87. size_t old_page_count = page_count();
  88. m_size = new_size;
  89. if (page_count() > old_page_count) {
  90. // Add null pages and let the fault handler page these in when that day comes.
  91. for (size_t i = old_page_count; i < page_count(); ++i)
  92. m_physical_pages.append(nullptr);
  93. } else {
  94. // Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
  95. for (size_t i = page_count(); i < old_page_count; ++i)
  96. m_physical_pages.take_last();
  97. }
  98. // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
  99. for_each_region([] (Region& region) {
  100. ASSERT(region.page_directory());
  101. MM.remap_region(*region.page_directory(), region);
  102. });
  103. }
  104. void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const byte* data)
  105. {
  106. (void)size;
  107. (void)data;
  108. InterruptDisabler disabler;
  109. ASSERT(offset >= 0);
  110. // FIXME: Only invalidate the parts that actually changed.
  111. for (auto& physical_page : m_physical_pages)
  112. physical_page = nullptr;
  113. #if 0
  114. size_t current_offset = offset;
  115. size_t remaining_bytes = size;
  116. const byte* data_ptr = data;
  117. auto to_page_index = [] (size_t offset) -> size_t {
  118. return offset / PAGE_SIZE;
  119. };
  120. if (current_offset & PAGE_MASK) {
  121. size_t page_index = to_page_index(current_offset);
  122. size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
  123. if (m_physical_pages[page_index]) {
  124. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  125. memcpy(ptr, data_ptr, bytes_to_copy);
  126. MM.unquickmap_page();
  127. }
  128. current_offset += bytes_to_copy;
  129. data += bytes_to_copy;
  130. remaining_bytes -= bytes_to_copy;
  131. }
  132. for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
  133. size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
  134. if (m_physical_pages[page_index]) {
  135. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  136. memcpy(ptr, data_ptr, bytes_to_copy);
  137. MM.unquickmap_page();
  138. }
  139. current_offset += bytes_to_copy;
  140. data += bytes_to_copy;
  141. }
  142. #endif
  143. // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
  144. for_each_region([] (Region& region) {
  145. ASSERT(region.page_directory());
  146. MM.remap_region(*region.page_directory(), region);
  147. });
  148. }