InodeVMObject.cpp 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #include <Kernel/FileSystem/Inode.h>
  2. #include <Kernel/VM/InodeVMObject.h>
  3. #include <Kernel/VM/MemoryManager.h>
  4. #include <Kernel/VM/Region.h>
  5. NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
  6. {
  7. InterruptDisabler disabler;
  8. if (inode.vmo())
  9. return *inode.vmo();
  10. auto vmo = adopt(*new InodeVMObject(inode));
  11. vmo->inode().set_vmo(*vmo);
  12. return vmo;
  13. }
  14. NonnullRefPtr<VMObject> InodeVMObject::clone()
  15. {
  16. return adopt(*new InodeVMObject(*this));
  17. }
  18. InodeVMObject::InodeVMObject(Inode& inode)
  19. : VMObject(inode.size())
  20. , m_inode(inode)
  21. {
  22. }
  23. InodeVMObject::InodeVMObject(const InodeVMObject& other)
  24. : VMObject(other)
  25. , m_inode(other.m_inode)
  26. {
  27. }
  28. InodeVMObject::~InodeVMObject()
  29. {
  30. ASSERT(inode().vmo() == this);
  31. }
  32. void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
  33. {
  34. dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
  35. m_inode->fsid(), m_inode->index(),
  36. old_size, new_size);
  37. InterruptDisabler disabler;
  38. auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
  39. m_physical_pages.resize(new_page_count);
  40. // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
  41. for_each_region([](Region& region) {
  42. ASSERT(region.page_directory());
  43. MM.remap_region(*region.page_directory(), region);
  44. });
  45. }
  46. void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
  47. {
  48. (void)size;
  49. (void)data;
  50. InterruptDisabler disabler;
  51. ASSERT(offset >= 0);
  52. // FIXME: Only invalidate the parts that actually changed.
  53. for (auto& physical_page : m_physical_pages)
  54. physical_page = nullptr;
  55. #if 0
  56. size_t current_offset = offset;
  57. size_t remaining_bytes = size;
  58. const u8* data_ptr = data;
  59. auto to_page_index = [] (size_t offset) -> size_t {
  60. return offset / PAGE_SIZE;
  61. };
  62. if (current_offset & PAGE_MASK) {
  63. size_t page_index = to_page_index(current_offset);
  64. size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
  65. if (m_physical_pages[page_index]) {
  66. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  67. memcpy(ptr, data_ptr, bytes_to_copy);
  68. MM.unquickmap_page();
  69. }
  70. current_offset += bytes_to_copy;
  71. data += bytes_to_copy;
  72. remaining_bytes -= bytes_to_copy;
  73. }
  74. for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
  75. size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
  76. if (m_physical_pages[page_index]) {
  77. auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
  78. memcpy(ptr, data_ptr, bytes_to_copy);
  79. MM.unquickmap_page();
  80. }
  81. current_offset += bytes_to_copy;
  82. data += bytes_to_copy;
  83. }
  84. #endif
  85. // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
  86. for_each_region([](Region& region) {
  87. ASSERT(region.page_directory());
  88. MM.remap_region(*region.page_directory(), region);
  89. });
  90. }
  91. template<typename Callback>
  92. void VMObject::for_each_region(Callback callback)
  93. {
  94. // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
  95. // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
  96. for (auto& region : MM.m_user_regions) {
  97. if (&region.vmo() == this)
  98. callback(region);
  99. }
  100. for (auto& region : MM.m_kernel_regions) {
  101. if (&region.vmo() == this)
  102. callback(region);
  103. }
  104. }