FileSystem.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, sin-ack <sin-ack@protonmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <Kernel/Debug.h>
  8. #include <Kernel/FileSystem/Ext2FS/FileSystem.h>
  9. #include <Kernel/FileSystem/Ext2FS/Inode.h>
  10. #include <Kernel/Process.h>
  11. #include <Kernel/UnixTypes.h>
  12. namespace Kernel {
  13. ErrorOr<NonnullRefPtr<FileSystem>> Ext2FS::try_create(OpenFileDescription& file_description)
  14. {
  15. return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) Ext2FS(file_description)));
  16. }
  17. Ext2FS::Ext2FS(OpenFileDescription& file_description)
  18. : BlockBasedFileSystem(file_description)
  19. {
  20. }
  21. Ext2FS::~Ext2FS() = default;
  22. ErrorOr<void> Ext2FS::flush_super_block()
  23. {
  24. MutexLocker locker(m_lock);
  25. VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
  26. auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
  27. return raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
  28. }
  29. ext2_group_desc const& Ext2FS::group_descriptor(GroupIndex group_index) const
  30. {
  31. // FIXME: Should this fail gracefully somehow?
  32. VERIFY(group_index <= m_block_group_count);
  33. VERIFY(group_index > 0);
  34. return block_group_descriptors()[group_index.value() - 1];
  35. }
  36. bool Ext2FS::is_initialized_while_locked()
  37. {
  38. VERIFY(m_lock.is_locked());
  39. return !m_root_inode.is_null();
  40. }
  41. ErrorOr<void> Ext2FS::initialize_while_locked()
  42. {
  43. VERIFY(m_lock.is_locked());
  44. VERIFY(!is_initialized_while_locked());
  45. VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
  46. auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
  47. TRY(raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer));
  48. auto const& super_block = this->super_block();
  49. if constexpr (EXT2_DEBUG) {
  50. dmesgln("Ext2FS: super block magic: {:04x} (super block size: {})", super_block.s_magic, sizeof(ext2_super_block));
  51. }
  52. if (super_block.s_magic != EXT2_SUPER_MAGIC) {
  53. dmesgln("Ext2FS: Bad super block magic");
  54. return EINVAL;
  55. }
  56. if constexpr (EXT2_DEBUG) {
  57. dmesgln("Ext2FS: {} inodes, {} blocks", super_block.s_inodes_count, super_block.s_blocks_count);
  58. dmesgln("Ext2FS: Block size: {}", EXT2_BLOCK_SIZE(&super_block));
  59. dmesgln("Ext2FS: First data block: {}", super_block.s_first_data_block);
  60. dmesgln("Ext2FS: Inodes per block: {}", inodes_per_block());
  61. dmesgln("Ext2FS: Inodes per group: {}", inodes_per_group());
  62. dmesgln("Ext2FS: Free inodes: {}", super_block.s_free_inodes_count);
  63. dmesgln("Ext2FS: Descriptors per block: {}", EXT2_DESC_PER_BLOCK(&super_block));
  64. dmesgln("Ext2FS: Descriptor size: {}", EXT2_DESC_SIZE(&super_block));
  65. }
  66. set_block_size(EXT2_BLOCK_SIZE(&super_block));
  67. set_fragment_size(EXT2_FRAG_SIZE(&super_block));
  68. // Note: This depends on the block size being available.
  69. TRY(BlockBasedFileSystem::initialize_while_locked());
  70. VERIFY(block_size() <= (int)max_block_size);
  71. m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group);
  72. if (m_block_group_count == 0) {
  73. dmesgln("Ext2FS: no block groups :(");
  74. return EINVAL;
  75. }
  76. auto blocks_to_read = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
  77. BlockIndex first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
  78. m_cached_group_descriptor_table = TRY(KBuffer::try_create_with_size("Ext2FS: Block group descriptors"sv, block_size() * blocks_to_read, Memory::Region::Access::ReadWrite));
  79. auto buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_group_descriptor_table->data());
  80. TRY(read_blocks(first_block_of_bgdt, blocks_to_read, buffer));
  81. if constexpr (EXT2_DEBUG) {
  82. for (unsigned i = 1; i <= m_block_group_count; ++i) {
  83. auto const& group = group_descriptor(i);
  84. dbgln("Ext2FS: group[{}] ( block_bitmap: {}, inode_bitmap: {}, inode_table: {} )", i, group.bg_block_bitmap, group.bg_inode_bitmap, group.bg_inode_table);
  85. }
  86. }
  87. m_root_inode = TRY(build_root_inode());
  88. return {};
  89. }
  90. Inode& Ext2FS::root_inode()
  91. {
  92. return *m_root_inode;
  93. }
  94. bool Ext2FS::find_block_containing_inode(InodeIndex inode, BlockIndex& block_index, unsigned& offset) const
  95. {
  96. auto const& super_block = this->super_block();
  97. if (inode != EXT2_ROOT_INO && inode < EXT2_FIRST_INO(&super_block))
  98. return false;
  99. if (inode > super_block.s_inodes_count)
  100. return false;
  101. auto const& bgd = group_descriptor(group_index_from_inode(inode));
  102. u64 full_offset = ((inode.value() - 1) % inodes_per_group()) * inode_size();
  103. block_index = bgd.bg_inode_table + (full_offset >> EXT2_BLOCK_SIZE_BITS(&super_block));
  104. offset = full_offset & (block_size() - 1);
  105. return true;
  106. }
  107. Ext2FS::BlockListShape Ext2FS::compute_block_list_shape(unsigned blocks) const
  108. {
  109. BlockListShape shape;
  110. unsigned const entries_per_block = EXT2_ADDR_PER_BLOCK(&super_block());
  111. unsigned blocks_remaining = blocks;
  112. shape.direct_blocks = min((unsigned)EXT2_NDIR_BLOCKS, blocks_remaining);
  113. blocks_remaining -= shape.direct_blocks;
  114. if (!blocks_remaining)
  115. return shape;
  116. shape.indirect_blocks = min(blocks_remaining, entries_per_block);
  117. shape.meta_blocks += 1;
  118. blocks_remaining -= shape.indirect_blocks;
  119. if (!blocks_remaining)
  120. return shape;
  121. shape.doubly_indirect_blocks = min(blocks_remaining, entries_per_block * entries_per_block);
  122. shape.meta_blocks += 1;
  123. shape.meta_blocks += ceil_div(shape.doubly_indirect_blocks, entries_per_block);
  124. blocks_remaining -= shape.doubly_indirect_blocks;
  125. if (!blocks_remaining)
  126. return shape;
  127. shape.triply_indirect_blocks = min(blocks_remaining, entries_per_block * entries_per_block * entries_per_block);
  128. shape.meta_blocks += 1;
  129. shape.meta_blocks += ceil_div(shape.triply_indirect_blocks, entries_per_block * entries_per_block);
  130. shape.meta_blocks += ceil_div(shape.triply_indirect_blocks, entries_per_block);
  131. blocks_remaining -= shape.triply_indirect_blocks;
  132. VERIFY(blocks_remaining == 0);
  133. return shape;
  134. }
  135. u8 Ext2FS::internal_file_type_to_directory_entry_type(DirectoryEntryView const& entry) const
  136. {
  137. switch (entry.file_type) {
  138. case EXT2_FT_REG_FILE:
  139. return DT_REG;
  140. case EXT2_FT_DIR:
  141. return DT_DIR;
  142. case EXT2_FT_CHRDEV:
  143. return DT_CHR;
  144. case EXT2_FT_BLKDEV:
  145. return DT_BLK;
  146. case EXT2_FT_FIFO:
  147. return DT_FIFO;
  148. case EXT2_FT_SOCK:
  149. return DT_SOCK;
  150. case EXT2_FT_SYMLINK:
  151. return DT_LNK;
  152. default:
  153. return DT_UNKNOWN;
  154. }
  155. }
  156. Ext2FS::FeaturesReadOnly Ext2FS::get_features_readonly() const
  157. {
  158. if (m_super_block.s_rev_level > 0)
  159. return static_cast<Ext2FS::FeaturesReadOnly>(m_super_block.s_feature_ro_compat);
  160. return Ext2FS::FeaturesReadOnly::None;
  161. }
  162. u64 Ext2FS::inodes_per_block() const
  163. {
  164. return EXT2_INODES_PER_BLOCK(&super_block());
  165. }
  166. u64 Ext2FS::inodes_per_group() const
  167. {
  168. return EXT2_INODES_PER_GROUP(&super_block());
  169. }
  170. u64 Ext2FS::inode_size() const
  171. {
  172. return EXT2_INODE_SIZE(&super_block());
  173. }
  174. u64 Ext2FS::blocks_per_group() const
  175. {
  176. return EXT2_BLOCKS_PER_GROUP(&super_block());
  177. }
  178. ErrorOr<void> Ext2FS::write_ext2_inode(InodeIndex inode, ext2_inode const& e2inode)
  179. {
  180. BlockIndex block_index;
  181. unsigned offset;
  182. if (!find_block_containing_inode(inode, block_index, offset))
  183. return EINVAL;
  184. auto buffer = UserOrKernelBuffer::for_kernel_buffer(const_cast<u8*>((u8 const*)&e2inode));
  185. return write_block(block_index, buffer, inode_size(), offset);
  186. }
  187. auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) -> ErrorOr<Vector<BlockIndex>>
  188. {
  189. dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_blocks(preferred group: {}, count {})", preferred_group_index, count);
  190. if (count == 0)
  191. return Vector<BlockIndex> {};
  192. Vector<BlockIndex> blocks;
  193. TRY(blocks.try_ensure_capacity(count));
  194. MutexLocker locker(m_lock);
  195. auto group_index = preferred_group_index;
  196. if (!group_descriptor(preferred_group_index).bg_free_blocks_count) {
  197. group_index = 1;
  198. }
  199. while (blocks.size() < count) {
  200. bool found_a_group = false;
  201. if (group_descriptor(group_index).bg_free_blocks_count) {
  202. found_a_group = true;
  203. } else {
  204. if (group_index == preferred_group_index)
  205. group_index = 1;
  206. for (; group_index <= m_block_group_count; group_index = GroupIndex { group_index.value() + 1 }) {
  207. if (group_descriptor(group_index).bg_free_blocks_count) {
  208. found_a_group = true;
  209. break;
  210. }
  211. }
  212. }
  213. VERIFY(found_a_group);
  214. auto const& bgd = group_descriptor(group_index);
  215. auto* cached_bitmap = TRY(get_bitmap_block(bgd.bg_block_bitmap));
  216. int blocks_in_group = min(blocks_per_group(), super_block().s_blocks_count);
  217. auto block_bitmap = cached_bitmap->bitmap(blocks_in_group);
  218. BlockIndex first_block_in_group = (group_index.value() - 1) * blocks_per_group() + first_block_index().value();
  219. size_t free_region_size = 0;
  220. auto first_unset_bit_index = block_bitmap.find_longest_range_of_unset_bits(count - blocks.size(), free_region_size);
  221. VERIFY(first_unset_bit_index.has_value());
  222. dbgln_if(EXT2_DEBUG, "Ext2FS: allocating free region of size: {} [{}]", free_region_size, group_index);
  223. for (size_t i = 0; i < free_region_size; ++i) {
  224. BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group.value();
  225. TRY(set_block_allocation_state(block_index, true));
  226. blocks.unchecked_append(block_index);
  227. dbgln_if(EXT2_DEBUG, " allocated > {}", block_index);
  228. }
  229. }
  230. VERIFY(blocks.size() == count);
  231. return blocks;
  232. }
  233. ErrorOr<InodeIndex> Ext2FS::allocate_inode(GroupIndex preferred_group)
  234. {
  235. dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_inode(preferred_group: {})", preferred_group);
  236. MutexLocker locker(m_lock);
  237. // FIXME: We shouldn't refuse to allocate an inode if there is no group that can house the whole thing.
  238. // In those cases we should just spread it across multiple groups.
  239. auto is_suitable_group = [this](auto group_index) {
  240. auto& bgd = group_descriptor(group_index);
  241. return bgd.bg_free_inodes_count && bgd.bg_free_blocks_count >= 1;
  242. };
  243. GroupIndex group_index;
  244. if (preferred_group.value() && is_suitable_group(preferred_group)) {
  245. group_index = preferred_group;
  246. } else {
  247. for (unsigned i = 1; i <= m_block_group_count; ++i) {
  248. if (is_suitable_group(i)) {
  249. group_index = i;
  250. break;
  251. }
  252. }
  253. }
  254. if (!group_index) {
  255. dmesgln("Ext2FS: allocate_inode: no suitable group found for new inode");
  256. return ENOSPC;
  257. }
  258. dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_inode: found suitable group [{}] for new inode :^)", group_index);
  259. auto const& bgd = group_descriptor(group_index);
  260. unsigned inodes_in_group = min(inodes_per_group(), super_block().s_inodes_count);
  261. InodeIndex first_inode_in_group = (group_index.value() - 1) * inodes_per_group() + 1;
  262. auto* cached_bitmap = TRY(get_bitmap_block(bgd.bg_inode_bitmap));
  263. auto inode_bitmap = cached_bitmap->bitmap(inodes_in_group);
  264. for (size_t i = 0; i < inode_bitmap.size(); ++i) {
  265. if (inode_bitmap.get(i))
  266. continue;
  267. inode_bitmap.set(i, true);
  268. auto inode_index = InodeIndex(first_inode_in_group.value() + i);
  269. cached_bitmap->dirty = true;
  270. m_super_block.s_free_inodes_count--;
  271. m_super_block_dirty = true;
  272. const_cast<ext2_group_desc&>(bgd).bg_free_inodes_count--;
  273. m_block_group_descriptors_dirty = true;
  274. // In case the inode cache had this cached as "non-existent", uncache that info.
  275. m_inode_cache.remove(inode_index.value());
  276. return inode_index;
  277. }
  278. dmesgln("Ext2FS: allocate_inode found no available inode, despite bgd claiming there are inodes :(");
  279. return EIO;
  280. }
  281. Ext2FS::GroupIndex Ext2FS::group_index_from_block_index(BlockIndex block_index) const
  282. {
  283. if (!block_index)
  284. return 0;
  285. return (block_index.value() - 1) / blocks_per_group() + 1;
  286. }
  287. auto Ext2FS::group_index_from_inode(InodeIndex inode) const -> GroupIndex
  288. {
  289. if (!inode)
  290. return 0;
  291. return (inode.value() - 1) / inodes_per_group() + 1;
  292. }
  293. ErrorOr<bool> Ext2FS::get_inode_allocation_state(InodeIndex index) const
  294. {
  295. MutexLocker locker(m_lock);
  296. if (index == 0)
  297. return EINVAL;
  298. auto group_index = group_index_from_inode(index);
  299. auto const& bgd = group_descriptor(group_index);
  300. unsigned index_in_group = index.value() - ((group_index.value() - 1) * inodes_per_group());
  301. unsigned bit_index = (index_in_group - 1) % inodes_per_group();
  302. auto* cached_bitmap = TRY(const_cast<Ext2FS&>(*this).get_bitmap_block(bgd.bg_inode_bitmap));
  303. return cached_bitmap->bitmap(inodes_per_group()).get(bit_index);
  304. }
  305. ErrorOr<void> Ext2FS::update_bitmap_block(BlockIndex bitmap_block, size_t bit_index, bool new_state, u32& super_block_counter, u16& group_descriptor_counter)
  306. {
  307. auto* cached_bitmap = TRY(get_bitmap_block(bitmap_block));
  308. bool current_state = cached_bitmap->bitmap(blocks_per_group()).get(bit_index);
  309. if (current_state == new_state) {
  310. dbgln("Ext2FS: Bit {} in bitmap block {} had unexpected state {}", bit_index, bitmap_block, current_state);
  311. return EIO;
  312. }
  313. cached_bitmap->bitmap(blocks_per_group()).set(bit_index, new_state);
  314. cached_bitmap->dirty = true;
  315. if (new_state) {
  316. --super_block_counter;
  317. --group_descriptor_counter;
  318. } else {
  319. ++super_block_counter;
  320. ++group_descriptor_counter;
  321. }
  322. m_super_block_dirty = true;
  323. m_block_group_descriptors_dirty = true;
  324. return {};
  325. }
  326. ErrorOr<void> Ext2FS::set_inode_allocation_state(InodeIndex inode_index, bool new_state)
  327. {
  328. MutexLocker locker(m_lock);
  329. auto group_index = group_index_from_inode(inode_index);
  330. unsigned index_in_group = inode_index.value() - ((group_index.value() - 1) * inodes_per_group());
  331. unsigned bit_index = (index_in_group - 1) % inodes_per_group();
  332. dbgln_if(EXT2_DEBUG, "Ext2FS: set_inode_allocation_state: Inode {} -> {}", inode_index, new_state);
  333. auto& bgd = const_cast<ext2_group_desc&>(group_descriptor(group_index));
  334. return update_bitmap_block(bgd.bg_inode_bitmap, bit_index, new_state, m_super_block.s_free_inodes_count, bgd.bg_free_inodes_count);
  335. }
  336. Ext2FS::BlockIndex Ext2FS::first_block_index() const
  337. {
  338. return block_size() == 1024 ? 1 : 0;
  339. }
  340. ErrorOr<Ext2FS::CachedBitmap*> Ext2FS::get_bitmap_block(BlockIndex bitmap_block_index)
  341. {
  342. for (auto& cached_bitmap : m_cached_bitmaps) {
  343. if (cached_bitmap->bitmap_block_index == bitmap_block_index)
  344. return cached_bitmap.ptr();
  345. }
  346. auto block = TRY(KBuffer::try_create_with_size("Ext2FS: Cached bitmap block"sv, block_size(), Memory::Region::Access::ReadWrite));
  347. auto buffer = UserOrKernelBuffer::for_kernel_buffer(block->data());
  348. TRY(read_block(bitmap_block_index, &buffer, block_size()));
  349. auto new_bitmap = TRY(adopt_nonnull_own_or_enomem(new (nothrow) CachedBitmap(bitmap_block_index, move(block))));
  350. TRY(m_cached_bitmaps.try_append(move(new_bitmap)));
  351. return m_cached_bitmaps.last().ptr();
  352. }
  353. ErrorOr<void> Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
  354. {
  355. VERIFY(block_index != 0);
  356. MutexLocker locker(m_lock);
  357. auto group_index = group_index_from_block_index(block_index);
  358. unsigned index_in_group = (block_index.value() - first_block_index().value()) - ((group_index.value() - 1) * blocks_per_group());
  359. unsigned bit_index = index_in_group % blocks_per_group();
  360. auto& bgd = const_cast<ext2_group_desc&>(group_descriptor(group_index));
  361. dbgln_if(EXT2_DEBUG, "Ext2FS: Block {} state -> {} (in bitmap block {})", block_index, new_state, bgd.bg_block_bitmap);
  362. return update_bitmap_block(bgd.bg_block_bitmap, bit_index, new_state, m_super_block.s_free_blocks_count, bgd.bg_free_blocks_count);
  363. }
  364. ErrorOr<NonnullRefPtr<Inode>> Ext2FS::create_directory(Ext2FSInode& parent_inode, StringView name, mode_t mode, UserID uid, GroupID gid)
  365. {
  366. MutexLocker locker(m_lock);
  367. VERIFY(is_directory(mode));
  368. auto inode = TRY(create_inode(parent_inode, name, mode, 0, uid, gid));
  369. dbgln_if(EXT2_DEBUG, "Ext2FS: create_directory: created new directory named '{} with inode {}", name, inode->index());
  370. Vector<Ext2FSDirectoryEntry> entries;
  371. auto current_directory_name = TRY(KString::try_create("."sv));
  372. TRY(entries.try_empend(move(current_directory_name), inode->index(), static_cast<u8>(EXT2_FT_DIR)));
  373. auto parent_directory_name = TRY(KString::try_create(".."sv));
  374. TRY(entries.try_empend(move(parent_directory_name), parent_inode.index(), static_cast<u8>(EXT2_FT_DIR)));
  375. TRY(static_cast<Ext2FSInode&>(*inode).write_directory(entries));
  376. TRY(parent_inode.increment_link_count());
  377. auto& bgd = const_cast<ext2_group_desc&>(group_descriptor(group_index_from_inode(inode->identifier().index())));
  378. ++bgd.bg_used_dirs_count;
  379. m_block_group_descriptors_dirty = true;
  380. return inode;
  381. }
  382. ErrorOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode, StringView name, mode_t mode, dev_t dev, UserID uid, GroupID gid)
  383. {
  384. if (name.length() > EXT2_NAME_LEN)
  385. return ENAMETOOLONG;
  386. if (parent_inode.m_raw_inode.i_links_count == 0)
  387. return ENOENT;
  388. ext2_inode e2inode {};
  389. auto now = kgettimeofday().truncated_seconds_since_epoch();
  390. e2inode.i_mode = mode;
  391. e2inode.i_uid = uid.value();
  392. e2inode.i_gid = gid.value();
  393. e2inode.i_size = 0;
  394. e2inode.i_atime = now;
  395. e2inode.i_ctime = now;
  396. e2inode.i_mtime = now;
  397. e2inode.i_dtime = 0;
  398. e2inode.i_flags = 0;
  399. // For directories, add +1 link count for the "." entry in self.
  400. e2inode.i_links_count = is_directory(mode);
  401. if (is_character_device(mode))
  402. e2inode.i_block[0] = dev;
  403. else if (is_block_device(mode))
  404. e2inode.i_block[1] = dev;
  405. auto inode_id = TRY(allocate_inode());
  406. dbgln_if(EXT2_DEBUG, "Ext2FS: writing initial metadata for inode {}", inode_id.value());
  407. TRY(write_ext2_inode(inode_id, e2inode));
  408. auto new_inode = TRY(get_inode({ fsid(), inode_id }));
  409. dbgln_if(EXT2_DEBUG, "Ext2FS: Adding inode '{}' (mode {:o}) to parent directory {}", name, mode, parent_inode.index());
  410. TRY(parent_inode.add_child(*new_inode, name, mode));
  411. return new_inode;
  412. }
  413. void Ext2FS::uncache_inode(InodeIndex index)
  414. {
  415. MutexLocker locker(m_lock);
  416. m_inode_cache.remove(index);
  417. }
  418. unsigned Ext2FS::total_block_count() const
  419. {
  420. MutexLocker locker(m_lock);
  421. return super_block().s_blocks_count;
  422. }
  423. unsigned Ext2FS::free_block_count() const
  424. {
  425. MutexLocker locker(m_lock);
  426. return super_block().s_free_blocks_count;
  427. }
  428. unsigned Ext2FS::total_inode_count() const
  429. {
  430. MutexLocker locker(m_lock);
  431. return super_block().s_inodes_count;
  432. }
  433. unsigned Ext2FS::free_inode_count() const
  434. {
  435. MutexLocker locker(m_lock);
  436. return super_block().s_free_inodes_count;
  437. }
  438. ErrorOr<void> Ext2FS::prepare_to_clear_last_mount()
  439. {
  440. MutexLocker locker(m_lock);
  441. for (auto& it : m_inode_cache) {
  442. if (it.value->ref_count() > 1)
  443. return EBUSY;
  444. }
  445. BlockBasedFileSystem::remove_disk_cache_before_last_unmount();
  446. m_inode_cache.clear();
  447. m_root_inode = nullptr;
  448. return {};
  449. }
  450. ErrorOr<void> Ext2FS::free_inode(Ext2FSInode& inode)
  451. {
  452. MutexLocker locker(m_lock);
  453. VERIFY(inode.m_raw_inode.i_links_count == 0);
  454. dbgln_if(EXT2_DEBUG, "Ext2FS[{}]::free_inode(): Inode {} has no more links, time to delete!", fsid(), inode.index());
  455. // Mark all blocks used by this inode as free.
  456. {
  457. auto blocks = TRY(inode.compute_block_list_with_meta_blocks());
  458. for (auto block_index : blocks) {
  459. VERIFY(block_index <= super_block().s_blocks_count);
  460. if (block_index.value())
  461. TRY(set_block_allocation_state(block_index, false));
  462. }
  463. }
  464. // If the inode being freed is a directory, update block group directory counter.
  465. if (inode.is_directory()) {
  466. auto& bgd = const_cast<ext2_group_desc&>(group_descriptor(group_index_from_inode(inode.index())));
  467. --bgd.bg_used_dirs_count;
  468. dbgln_if(EXT2_DEBUG, "Ext2FS[{}]::free_inode(): Decremented bg_used_dirs_count to {} for inode {}", fsid(), bgd.bg_used_dirs_count, inode.index());
  469. m_block_group_descriptors_dirty = true;
  470. }
  471. // NOTE: After this point, the inode metadata is wiped.
  472. memset(&inode.m_raw_inode, 0, sizeof(ext2_inode));
  473. inode.m_raw_inode.i_dtime = kgettimeofday().truncated_seconds_since_epoch();
  474. TRY(write_ext2_inode(inode.index(), inode.m_raw_inode));
  475. // Mark the inode as free.
  476. TRY(set_inode_allocation_state(inode.index(), false));
  477. return {};
  478. }
  479. void Ext2FS::flush_block_group_descriptor_table()
  480. {
  481. MutexLocker locker(m_lock);
  482. auto blocks_to_write = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
  483. auto first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
  484. auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)block_group_descriptors());
  485. if (auto result = write_blocks(first_block_of_bgdt, blocks_to_write, buffer); result.is_error())
  486. dbgln("Ext2FS[{}]::flush_block_group_descriptor_table(): Failed to write blocks: {}", fsid(), result.error());
  487. }
  488. void Ext2FS::flush_writes()
  489. {
  490. {
  491. MutexLocker locker(m_lock);
  492. if (m_super_block_dirty) {
  493. auto result = flush_super_block();
  494. if (result.is_error()) {
  495. dbgln("Ext2FS[{}]::flush_writes(): Failed to write superblock: {}", fsid(), result.error());
  496. // FIXME: We should handle this error.
  497. VERIFY_NOT_REACHED();
  498. }
  499. m_super_block_dirty = false;
  500. }
  501. if (m_block_group_descriptors_dirty) {
  502. flush_block_group_descriptor_table();
  503. m_block_group_descriptors_dirty = false;
  504. }
  505. for (auto& cached_bitmap : m_cached_bitmaps) {
  506. if (cached_bitmap->dirty) {
  507. auto buffer = UserOrKernelBuffer::for_kernel_buffer(cached_bitmap->buffer->data());
  508. if (auto result = write_block(cached_bitmap->bitmap_block_index, buffer, block_size()); result.is_error()) {
  509. dbgln("Ext2FS[{}]::flush_writes(): Failed to write blocks: {}", fsid(), result.error());
  510. }
  511. cached_bitmap->dirty = false;
  512. dbgln_if(EXT2_DEBUG, "Ext2FS[{}]::flush_writes(): Flushed bitmap block {}", fsid(), cached_bitmap->bitmap_block_index);
  513. }
  514. }
  515. // Uncache Inodes that are only kept alive by the index-to-inode lookup cache.
  516. // We don't uncache Inodes that are being watched by at least one InodeWatcher.
  517. // FIXME: It would be better to keep a capped number of Inodes around.
  518. // The problem is that they are quite heavy objects, and use a lot of heap memory
  519. // for their (child name lookup) and (block list) caches.
  520. m_inode_cache.remove_all_matching([](InodeIndex, RefPtr<Ext2FSInode> const& cached_inode) {
  521. // NOTE: If we're asked to look up an inode by number (via get_inode) and it turns out
  522. // to not exist, we remember the fact that it doesn't exist by caching a nullptr.
  523. // This seems like a reasonable time to uncache ideas about unknown inodes, so do that.
  524. if (cached_inode == nullptr)
  525. return true;
  526. return cached_inode->ref_count() == 1 && !cached_inode->has_watchers();
  527. });
  528. }
  529. BlockBasedFileSystem::flush_writes();
  530. }
  531. ErrorOr<NonnullRefPtr<Ext2FSInode>> Ext2FS::build_root_inode() const
  532. {
  533. MutexLocker locker(m_lock);
  534. BlockIndex block_index;
  535. unsigned offset;
  536. if (!find_block_containing_inode(EXT2_ROOT_INO, block_index, offset))
  537. return EINVAL;
  538. auto inode = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) Ext2FSInode(const_cast<Ext2FS&>(*this), EXT2_ROOT_INO)));
  539. auto buffer = UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<u8*>(&inode->m_raw_inode));
  540. TRY(read_block(block_index, &buffer, sizeof(ext2_inode), offset));
  541. return inode;
  542. }
  543. ErrorOr<NonnullRefPtr<Inode>> Ext2FS::get_inode(InodeIdentifier inode) const
  544. {
  545. MutexLocker locker(m_lock);
  546. VERIFY(inode.fsid() == fsid());
  547. VERIFY(m_root_inode);
  548. if (inode.index() == EXT2_ROOT_INO)
  549. return *m_root_inode;
  550. {
  551. auto it = m_inode_cache.find(inode.index());
  552. if (it != m_inode_cache.end()) {
  553. if (!it->value)
  554. return ENOENT;
  555. return NonnullRefPtr<Inode> { *it->value };
  556. }
  557. }
  558. auto inode_allocation_state = TRY(get_inode_allocation_state(inode.index()));
  559. if (!inode_allocation_state) {
  560. TRY(m_inode_cache.try_set(inode.index(), nullptr));
  561. return ENOENT;
  562. }
  563. BlockIndex block_index;
  564. unsigned offset;
  565. if (!find_block_containing_inode(inode.index(), block_index, offset))
  566. return EINVAL;
  567. auto new_inode = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) Ext2FSInode(const_cast<Ext2FS&>(*this), inode.index())));
  568. auto buffer = UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<u8*>(&new_inode->m_raw_inode));
  569. TRY(read_block(block_index, &buffer, sizeof(ext2_inode), offset));
  570. TRY(m_inode_cache.try_set(inode.index(), new_inode));
  571. return new_inode;
  572. }
  573. }