BlockBasedFileSystem.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <Kernel/FileSystem/BlockBasedFileSystem.h>
  27. #include <Kernel/Process.h>
  28. //#define BBFS_DEBUG
  29. namespace Kernel {
  30. struct CacheEntry {
  31. time_t timestamp { 0 };
  32. u32 block_index { 0 };
  33. u8* data { nullptr };
  34. bool has_data { false };
  35. bool is_dirty { false };
  36. bool is_used { false };
  37. };
  38. class DiskCache {
  39. public:
  40. explicit DiskCache(BlockBasedFS& fs)
  41. : m_fs(fs)
  42. , m_cached_block_data(KBuffer::create_with_size(m_entry_count * m_fs.block_size()))
  43. , m_entries(KBuffer::create_with_size(m_entry_count * sizeof(CacheEntry)))
  44. {
  45. for (size_t i = 0; i < m_entry_count; ++i) {
  46. entries()[i].data = m_cached_block_data.data() + i * m_fs.block_size();
  47. }
  48. }
  49. ~DiskCache() { }
  50. bool is_dirty() const { return m_dirty; }
  51. void set_dirty(bool b) { m_dirty = b; }
  52. CacheEntry& get(u32 block_index) const
  53. {
  54. auto now = kgettimeofday().tv_sec;
  55. if (auto it = m_map.find(block_index); it != m_map.end()) {
  56. auto& entry = const_cast<CacheEntry&>(entries()[it->value]);
  57. ASSERT(entry.block_index == block_index);
  58. entry.timestamp = now;
  59. return entry;
  60. }
  61. Optional<size_t> oldest_clean_index;
  62. for (size_t i = 0; i < m_entry_count; ++i) {
  63. auto& entry = const_cast<CacheEntry&>(entries()[i]);
  64. ASSERT(!entry.is_used || entry.block_index != block_index);
  65. if (!entry.is_dirty) {
  66. if (!oldest_clean_index.has_value())
  67. oldest_clean_index = i;
  68. else if (entry.timestamp < entries()[oldest_clean_index.value()].timestamp)
  69. oldest_clean_index = i;
  70. }
  71. }
  72. if (!oldest_clean_index.has_value()) {
  73. // Not a single clean entry! Flush writes and try again.
  74. // NOTE: We want to make sure we only call FileBackedFS flush here,
  75. // not some FileBackedFS subclass flush!
  76. m_fs.flush_writes_impl();
  77. return get(block_index);
  78. }
  79. // Replace the oldest clean entry.
  80. auto& new_entry = const_cast<CacheEntry&>(entries()[oldest_clean_index.value()]);
  81. // If this entry was occupied, remove the previous mapping from the fast lookup table.
  82. if (new_entry.block_index)
  83. m_map.remove(new_entry.block_index);
  84. // Create a fast lookup mapping from the block index to this entry.
  85. m_map.set(block_index, oldest_clean_index.value());
  86. new_entry.timestamp = now;
  87. new_entry.block_index = block_index;
  88. new_entry.has_data = false;
  89. new_entry.is_dirty = false;
  90. new_entry.is_used = true;
  91. return new_entry;
  92. }
  93. const CacheEntry* entries() const { return (const CacheEntry*)m_entries.data(); }
  94. CacheEntry* entries() { return (CacheEntry*)m_entries.data(); }
  95. template<typename Callback>
  96. void for_each_entry(Callback callback)
  97. {
  98. for (size_t i = 0; i < m_entry_count; ++i)
  99. callback(entries()[i]);
  100. }
  101. private:
  102. BlockBasedFS& m_fs;
  103. mutable HashMap<u32, u32> m_map;
  104. size_t m_entry_count { 10000 };
  105. KBuffer m_cached_block_data;
  106. KBuffer m_entries;
  107. bool m_dirty { false };
  108. };
  109. BlockBasedFS::BlockBasedFS(FileDescription& file_description)
  110. : FileBackedFS(file_description)
  111. {
  112. ASSERT(file_description.file().is_seekable());
  113. }
  114. BlockBasedFS::~BlockBasedFS()
  115. {
  116. }
  117. int BlockBasedFS::write_block(unsigned index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
  118. {
  119. ASSERT(m_logical_block_size);
  120. ASSERT(offset + count <= block_size());
  121. #ifdef BBFS_DEBUG
  122. klog() << "BlockBasedFileSystem::write_block " << index << ", size=" << count;
  123. #endif
  124. if (!allow_cache) {
  125. flush_specific_block_if_needed(index);
  126. u32 base_offset = static_cast<u32>(index) * static_cast<u32>(block_size()) + offset;
  127. file_description().seek(base_offset, SEEK_SET);
  128. auto nwritten = file_description().write(data, count);
  129. if (nwritten.is_error())
  130. return -EIO; // TODO: Return error code as-is, could be -EFAULT!
  131. ASSERT(nwritten.value() == count);
  132. return 0;
  133. }
  134. auto& entry = cache().get(index);
  135. if (count < block_size()) {
  136. // Fill the cache first.
  137. read_block(index, nullptr, block_size());
  138. }
  139. if (!data.read(entry.data + offset, count))
  140. return -EFAULT;
  141. entry.is_dirty = true;
  142. entry.has_data = true;
  143. cache().set_dirty(true);
  144. return 0;
  145. }
  146. bool BlockBasedFS::raw_read(unsigned index, UserOrKernelBuffer& buffer)
  147. {
  148. u32 base_offset = static_cast<u32>(index) * static_cast<u32>(m_logical_block_size);
  149. file_description().seek(base_offset, SEEK_SET);
  150. auto nread = file_description().read(buffer, m_logical_block_size);
  151. ASSERT(!nread.is_error());
  152. ASSERT(nread.value() == m_logical_block_size);
  153. return true;
  154. }
  155. bool BlockBasedFS::raw_write(unsigned index, const UserOrKernelBuffer& buffer)
  156. {
  157. u32 base_offset = static_cast<u32>(index) * static_cast<u32>(m_logical_block_size);
  158. file_description().seek(base_offset, SEEK_SET);
  159. auto nwritten = file_description().write(buffer, m_logical_block_size);
  160. ASSERT(!nwritten.is_error());
  161. ASSERT(nwritten.value() == m_logical_block_size);
  162. return true;
  163. }
  164. bool BlockBasedFS::raw_read_blocks(unsigned index, size_t count, UserOrKernelBuffer& buffer)
  165. {
  166. auto current = buffer;
  167. for (unsigned block = index; block < (index + count); block++) {
  168. if (!raw_read(block, current))
  169. return false;
  170. current = current.offset(logical_block_size());
  171. }
  172. return true;
  173. }
  174. bool BlockBasedFS::raw_write_blocks(unsigned index, size_t count, const UserOrKernelBuffer& buffer)
  175. {
  176. auto current = buffer;
  177. for (unsigned block = index; block < (index + count); block++) {
  178. if (!raw_write(block, current))
  179. return false;
  180. current = current.offset(logical_block_size());
  181. }
  182. return true;
  183. }
  184. int BlockBasedFS::write_blocks(unsigned index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
  185. {
  186. ASSERT(m_logical_block_size);
  187. #ifdef BBFS_DEBUG
  188. klog() << "BlockBasedFileSystem::write_blocks " << index << " x" << count;
  189. #endif
  190. for (unsigned i = 0; i < count; ++i)
  191. write_block(index + i, data.offset(i * block_size()), block_size(), 0, allow_cache);
  192. return 0;
  193. }
  194. int BlockBasedFS::read_block(unsigned index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
  195. {
  196. ASSERT(m_logical_block_size);
  197. ASSERT(offset + count <= block_size());
  198. #ifdef BBFS_DEBUG
  199. klog() << "BlockBasedFileSystem::read_block " << index;
  200. #endif
  201. if (!allow_cache) {
  202. const_cast<BlockBasedFS*>(this)->flush_specific_block_if_needed(index);
  203. u32 base_offset = static_cast<u32>(index) * static_cast<u32>(block_size()) + static_cast<u32>(offset);
  204. file_description().seek(base_offset, SEEK_SET);
  205. auto nread = file_description().read(*buffer, count);
  206. if (nread.is_error())
  207. return -EIO;
  208. ASSERT(nread.value() == count);
  209. return 0;
  210. }
  211. auto& entry = cache().get(index);
  212. if (!entry.has_data) {
  213. u32 base_offset = static_cast<u32>(index) * static_cast<u32>(block_size());
  214. file_description().seek(base_offset, SEEK_SET);
  215. auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
  216. auto nread = file_description().read(entry_data_buffer, block_size());
  217. if (nread.is_error())
  218. return -EIO;
  219. ASSERT(nread.value() == block_size());
  220. entry.has_data = true;
  221. }
  222. if (buffer && !buffer->write(entry.data + offset, count))
  223. return -EFAULT;
  224. return 0;
  225. }
  226. int BlockBasedFS::read_blocks(unsigned index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
  227. {
  228. ASSERT(m_logical_block_size);
  229. if (!count)
  230. return false;
  231. if (count == 1)
  232. return read_block(index, &buffer, block_size(), 0, allow_cache);
  233. auto out = buffer;
  234. for (unsigned i = 0; i < count; ++i) {
  235. auto err = read_block(index + i, &out, block_size(), 0, allow_cache);
  236. if (err < 0)
  237. return err;
  238. out = out.offset(block_size());
  239. }
  240. return 0;
  241. }
  242. void BlockBasedFS::flush_specific_block_if_needed(unsigned index)
  243. {
  244. LOCKER(m_lock);
  245. if (!cache().is_dirty())
  246. return;
  247. cache().for_each_entry([&](CacheEntry& entry) {
  248. if (entry.is_dirty && entry.block_index == index) {
  249. u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
  250. file_description().seek(base_offset, SEEK_SET);
  251. // FIXME: Should this error path be surfaced somehow?
  252. auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
  253. (void)file_description().write(entry_data_buffer, block_size());
  254. entry.is_dirty = false;
  255. }
  256. });
  257. }
  258. void BlockBasedFS::flush_writes_impl()
  259. {
  260. LOCKER(m_lock);
  261. if (!cache().is_dirty())
  262. return;
  263. u32 count = 0;
  264. cache().for_each_entry([&](CacheEntry& entry) {
  265. if (!entry.is_dirty)
  266. return;
  267. u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
  268. file_description().seek(base_offset, SEEK_SET);
  269. // FIXME: Should this error path be surfaced somehow?
  270. auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
  271. (void)file_description().write(entry_data_buffer, block_size());
  272. ++count;
  273. entry.is_dirty = false;
  274. });
  275. cache().set_dirty(false);
  276. dbg() << class_name() << ": Flushed " << count << " blocks to disk";
  277. }
  278. void BlockBasedFS::flush_writes()
  279. {
  280. flush_writes_impl();
  281. }
  282. DiskCache& BlockBasedFS::cache() const
  283. {
  284. if (!m_cache)
  285. m_cache = make<DiskCache>(const_cast<BlockBasedFS&>(*this));
  286. return *m_cache;
  287. }
  288. }