InodeVMObject.cpp 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #include <Kernel/FileSystem/Inode.h>
  2. #include <Kernel/VM/InodeVMObject.h>
  3. #include <Kernel/VM/MemoryManager.h>
  4. #include <Kernel/VM/Region.h>
  5. NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
  6. {
  7. InterruptDisabler disabler;
  8. if (inode.vmobject())
  9. return *inode.vmobject();
  10. auto vmobject = adopt(*new InodeVMObject(inode));
  11. vmobject->inode().set_vmobject(*vmobject);
  12. return vmobject;
  13. }
  14. NonnullRefPtr<VMObject> InodeVMObject::clone()
  15. {
  16. return adopt(*new InodeVMObject(*this));
  17. }
  18. InodeVMObject::InodeVMObject(Inode& inode)
  19. : VMObject(inode.size())
  20. , m_inode(inode)
  21. , m_dirty_pages(page_count(), false)
  22. {
  23. }
  24. InodeVMObject::InodeVMObject(const InodeVMObject& other)
  25. : VMObject(other)
  26. , m_inode(other.m_inode)
  27. {
  28. }
  29. InodeVMObject::~InodeVMObject()
  30. {
  31. ASSERT(inode().vmobject() == this);
  32. }
  33. size_t InodeVMObject::amount_clean() const
  34. {
  35. size_t count = 0;
  36. ASSERT(page_count() == (size_t)m_dirty_pages.size());
  37. for (size_t i = 0; i < page_count(); ++i) {
  38. if (!m_dirty_pages.get(i) && m_physical_pages[i])
  39. ++count;
  40. }
  41. return count * PAGE_SIZE;
  42. }
  43. size_t InodeVMObject::amount_dirty() const
  44. {
  45. size_t count = 0;
  46. for (int i = 0; i < m_dirty_pages.size(); ++i) {
  47. if (m_dirty_pages.get(i))
  48. ++count;
  49. }
  50. return count * PAGE_SIZE;
  51. }
  52. void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
  53. {
  54. dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
  55. m_inode->fsid(), m_inode->index(),
  56. old_size, new_size);
  57. InterruptDisabler disabler;
  58. auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
  59. m_physical_pages.resize(new_page_count);
  60. m_dirty_pages.grow(new_page_count, false);
  61. // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
  62. for_each_region([](auto& region) {
  63. region.remap();
  64. });
  65. }
  66. void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
  67. {
  68. (void)size;
  69. (void)data;
  70. InterruptDisabler disabler;
  71. ASSERT(offset >= 0);
  72. // FIXME: Only invalidate the parts that actually changed.
  73. for (auto& physical_page : m_physical_pages)
  74. physical_page = nullptr;
  75. #if 0
  76. size_t current_offset = offset;
  77. size_t remaining_bytes = size;
  78. const u8* data_ptr = data;
  79. auto to_page_index = [] (size_t offset) -> size_t {
  80. return offset / PAGE_SIZE;
  81. };
  82. if (current_offset & PAGE_MASK) {
  83. size_t page_index = to_page_index(current_offset);
  84. size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
  85. if (m_physical_pages[page_index]) {
  86. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  87. memcpy(ptr, data_ptr, bytes_to_copy);
  88. MM.unquickmap_page();
  89. }
  90. current_offset += bytes_to_copy;
  91. data += bytes_to_copy;
  92. remaining_bytes -= bytes_to_copy;
  93. }
  94. for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
  95. size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
  96. if (m_physical_pages[page_index]) {
  97. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  98. memcpy(ptr, data_ptr, bytes_to_copy);
  99. MM.unquickmap_page();
  100. }
  101. current_offset += bytes_to_copy;
  102. data += bytes_to_copy;
  103. }
  104. #endif
  105. // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
  106. for_each_region([](auto& region) {
  107. region.remap();
  108. });
  109. }
  110. int InodeVMObject::release_all_clean_pages()
  111. {
  112. LOCKER(m_paging_lock);
  113. return release_all_clean_pages_impl();
  114. }
  115. int InodeVMObject::release_all_clean_pages_impl()
  116. {
  117. int count = 0;
  118. InterruptDisabler disabler;
  119. for (size_t i = 0; i < page_count(); ++i) {
  120. if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
  121. m_physical_pages[i] = nullptr;
  122. ++count;
  123. }
  124. }
  125. for_each_region([](auto& region) {
  126. region.remap();
  127. });
  128. return count;
  129. }