DiskBackedFileSystem.cpp 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #include <Kernel/Arch/i386/CPU.h>
  2. #include <Kernel/FileSystem/DiskBackedFileSystem.h>
  3. #include <Kernel/KBuffer.h>
  4. #include <Kernel/Process.h>
  5. //#define DBFS_DEBUG
  6. struct CacheEntry {
  7. u32 timestamp { 0 };
  8. u32 block_index { 0 };
  9. u8* data { nullptr };
  10. bool has_data { false };
  11. bool is_dirty { false };
  12. };
  13. class DiskCache {
  14. public:
  15. explicit DiskCache(DiskBackedFS& fs)
  16. : m_fs(fs)
  17. , m_cached_block_data(KBuffer::create_with_size(m_entry_count * m_fs.block_size()))
  18. , m_entries(KBuffer::create_with_size(m_entry_count * sizeof(CacheEntry)))
  19. {
  20. for (size_t i = 0; i < m_entry_count; ++i) {
  21. entries()[i].data = m_cached_block_data.data() + i * m_fs.block_size();
  22. }
  23. }
  24. ~DiskCache() {}
  25. bool is_dirty() const { return m_dirty; }
  26. void set_dirty(bool b) { m_dirty = b; }
  27. CacheEntry& get(u32 block_index) const
  28. {
  29. auto now = kgettimeofday().tv_sec;
  30. CacheEntry* oldest_clean_entry = nullptr;
  31. for (size_t i = 0; i < m_entry_count; ++i) {
  32. auto& entry = const_cast<CacheEntry&>(entries()[i]);
  33. if (entry.block_index == block_index) {
  34. entry.timestamp = now;
  35. return entry;
  36. }
  37. if (!entry.is_dirty) {
  38. if (!oldest_clean_entry)
  39. oldest_clean_entry = &entry;
  40. else if (entry.timestamp < oldest_clean_entry->timestamp)
  41. oldest_clean_entry = &entry;
  42. }
  43. }
  44. if (!oldest_clean_entry) {
  45. // Not a single clean entry! Flush writes and try again.
  46. m_fs.flush_writes();
  47. return get(block_index);
  48. }
  49. // Replace the oldest clean entry.
  50. auto& new_entry = *oldest_clean_entry;
  51. new_entry.timestamp = now;
  52. new_entry.block_index = block_index;
  53. new_entry.has_data = false;
  54. new_entry.is_dirty = false;
  55. return new_entry;
  56. }
  57. const CacheEntry* entries() const { return (const CacheEntry*)m_entries.data(); }
  58. CacheEntry* entries() { return (CacheEntry*)m_entries.data(); }
  59. template<typename Callback>
  60. void for_each_entry(Callback callback)
  61. {
  62. for (size_t i = 0; i < m_entry_count; ++i)
  63. callback(entries()[i]);
  64. }
  65. private:
  66. DiskBackedFS& m_fs;
  67. size_t m_entry_count { 10000 };
  68. KBuffer m_cached_block_data;
  69. KBuffer m_entries;
  70. bool m_dirty { false };
  71. };
  72. DiskBackedFS::DiskBackedFS(NonnullRefPtr<DiskDevice>&& device)
  73. : m_device(move(device))
  74. {
  75. }
  76. DiskBackedFS::~DiskBackedFS()
  77. {
  78. }
  79. bool DiskBackedFS::write_block(unsigned index, const u8* data)
  80. {
  81. #ifdef DBFS_DEBUG
  82. kprintf("DiskBackedFileSystem::write_block %u, size=%u\n", index, data.size());
  83. #endif
  84. auto& entry = cache().get(index);
  85. memcpy(entry.data, data, block_size());
  86. entry.is_dirty = true;
  87. entry.has_data = true;
  88. cache().set_dirty(true);
  89. return true;
  90. }
  91. bool DiskBackedFS::write_blocks(unsigned index, unsigned count, const u8* data)
  92. {
  93. #ifdef DBFS_DEBUG
  94. kprintf("DiskBackedFileSystem::write_blocks %u x%u\n", index, count);
  95. #endif
  96. for (unsigned i = 0; i < count; ++i)
  97. write_block(index + i, data + i * block_size());
  98. return true;
  99. }
  100. bool DiskBackedFS::read_block(unsigned index, u8* buffer) const
  101. {
  102. #ifdef DBFS_DEBUG
  103. kprintf("DiskBackedFileSystem::read_block %u\n", index);
  104. #endif
  105. auto& entry = cache().get(index);
  106. if (!entry.has_data) {
  107. DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
  108. bool success = device().read(base_offset, block_size(), entry.data);
  109. entry.has_data = true;
  110. ASSERT(success);
  111. }
  112. memcpy(buffer, entry.data, block_size());
  113. return true;
  114. }
  115. bool DiskBackedFS::read_blocks(unsigned index, unsigned count, u8* buffer) const
  116. {
  117. if (!count)
  118. return false;
  119. if (count == 1)
  120. return read_block(index, buffer);
  121. u8* out = buffer;
  122. for (unsigned i = 0; i < count; ++i) {
  123. if (!read_block(index + i, out))
  124. return false;
  125. out += block_size();
  126. }
  127. return true;
  128. }
  129. void DiskBackedFS::flush_writes()
  130. {
  131. LOCKER(m_lock);
  132. if (!cache().is_dirty())
  133. return;
  134. u32 count = 0;
  135. cache().for_each_entry([&](CacheEntry& entry) {
  136. if (!entry.is_dirty)
  137. return;
  138. DiskOffset base_offset = static_cast<DiskOffset>(entry.block_index) * static_cast<DiskOffset>(block_size());
  139. device().write(base_offset, block_size(), entry.data);
  140. ++count;
  141. entry.is_dirty = false;
  142. });
  143. cache().set_dirty(false);
  144. dbg() << class_name() << ": "
  145. << "Flushed " << count << " blocks to disk";
  146. }
  147. DiskCache& DiskBackedFS::cache() const
  148. {
  149. if (!m_cache)
  150. m_cache = make<DiskCache>(const_cast<DiskBackedFS&>(*this));
  151. return *m_cache;
  152. }