InodeVMObject.cpp 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. #include <Kernel/FileSystem/Inode.h>
  2. #include <Kernel/VM/InodeVMObject.h>
  3. #include <Kernel/VM/MemoryManager.h>
  4. #include <Kernel/VM/Region.h>
  5. NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
  6. {
  7. size_t size = inode.size();
  8. InterruptDisabler disabler;
  9. if (inode.vmobject())
  10. return *inode.vmobject();
  11. auto vmobject = adopt(*new InodeVMObject(inode, size));
  12. vmobject->inode().set_vmobject(*vmobject);
  13. return vmobject;
  14. }
  15. NonnullRefPtr<VMObject> InodeVMObject::clone()
  16. {
  17. return adopt(*new InodeVMObject(*this));
  18. }
  19. InodeVMObject::InodeVMObject(Inode& inode, size_t size)
  20. : VMObject(size)
  21. , m_inode(inode)
  22. , m_dirty_pages(page_count(), false)
  23. {
  24. }
  25. InodeVMObject::InodeVMObject(const InodeVMObject& other)
  26. : VMObject(other)
  27. , m_inode(other.m_inode)
  28. {
  29. }
  30. InodeVMObject::~InodeVMObject()
  31. {
  32. ASSERT(inode().vmobject() == this);
  33. }
  34. size_t InodeVMObject::amount_clean() const
  35. {
  36. size_t count = 0;
  37. ASSERT(page_count() == (size_t)m_dirty_pages.size());
  38. for (size_t i = 0; i < page_count(); ++i) {
  39. if (!m_dirty_pages.get(i) && m_physical_pages[i])
  40. ++count;
  41. }
  42. return count * PAGE_SIZE;
  43. }
  44. size_t InodeVMObject::amount_dirty() const
  45. {
  46. size_t count = 0;
  47. for (int i = 0; i < m_dirty_pages.size(); ++i) {
  48. if (m_dirty_pages.get(i))
  49. ++count;
  50. }
  51. return count * PAGE_SIZE;
  52. }
  53. void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
  54. {
  55. dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
  56. m_inode->fsid(), m_inode->index(),
  57. old_size, new_size);
  58. InterruptDisabler disabler;
  59. auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
  60. m_physical_pages.resize(new_page_count);
  61. m_dirty_pages.grow(new_page_count, false);
  62. // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
  63. for_each_region([](auto& region) {
  64. region.remap();
  65. });
  66. }
  67. void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
  68. {
  69. (void)size;
  70. (void)data;
  71. InterruptDisabler disabler;
  72. ASSERT(offset >= 0);
  73. // FIXME: Only invalidate the parts that actually changed.
  74. for (auto& physical_page : m_physical_pages)
  75. physical_page = nullptr;
  76. #if 0
  77. size_t current_offset = offset;
  78. size_t remaining_bytes = size;
  79. const u8* data_ptr = data;
  80. auto to_page_index = [] (size_t offset) -> size_t {
  81. return offset / PAGE_SIZE;
  82. };
  83. if (current_offset & PAGE_MASK) {
  84. size_t page_index = to_page_index(current_offset);
  85. size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
  86. if (m_physical_pages[page_index]) {
  87. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  88. memcpy(ptr, data_ptr, bytes_to_copy);
  89. MM.unquickmap_page();
  90. }
  91. current_offset += bytes_to_copy;
  92. data += bytes_to_copy;
  93. remaining_bytes -= bytes_to_copy;
  94. }
  95. for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
  96. size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
  97. if (m_physical_pages[page_index]) {
  98. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  99. memcpy(ptr, data_ptr, bytes_to_copy);
  100. MM.unquickmap_page();
  101. }
  102. current_offset += bytes_to_copy;
  103. data += bytes_to_copy;
  104. }
  105. #endif
  106. // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
  107. for_each_region([](auto& region) {
  108. region.remap();
  109. });
  110. }
  111. int InodeVMObject::release_all_clean_pages()
  112. {
  113. LOCKER(m_paging_lock);
  114. return release_all_clean_pages_impl();
  115. }
  116. int InodeVMObject::release_all_clean_pages_impl()
  117. {
  118. int count = 0;
  119. InterruptDisabler disabler;
  120. for (size_t i = 0; i < page_count(); ++i) {
  121. if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
  122. m_physical_pages[i] = nullptr;
  123. ++count;
  124. }
  125. }
  126. for_each_region([](auto& region) {
  127. region.remap();
  128. });
  129. return count;
  130. }