Inode.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Copyright (c) 2019-2020, Sergey Bugaev <bugaevc@serenityos.org>
  3. * Copyright (c) 2022-2023, Liav A. <liavalb@hotmail.co.il>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <Kernel/FileSystem/RAMFS/Inode.h>
  8. #include <Kernel/Process.h>
  9. namespace Kernel {
  10. RAMFSInode::RAMFSInode(RAMFS& fs, InodeMetadata const& metadata, LockWeakPtr<RAMFSInode> parent)
  11. : Inode(fs, fs.next_inode_index())
  12. , m_metadata(metadata)
  13. , m_parent(move(parent))
  14. {
  15. m_metadata.inode = identifier();
  16. }
  17. RAMFSInode::RAMFSInode(RAMFS& fs)
  18. : Inode(fs, 1)
  19. , m_root_directory_inode(true)
  20. {
  21. auto now = kgettimeofday();
  22. m_metadata.inode = identifier();
  23. m_metadata.atime = now;
  24. m_metadata.ctime = now;
  25. m_metadata.mtime = now;
  26. m_metadata.mode = S_IFDIR | 0755;
  27. }
  28. RAMFSInode::~RAMFSInode() = default;
  29. ErrorOr<NonnullRefPtr<RAMFSInode>> RAMFSInode::try_create(RAMFS& fs, InodeMetadata const& metadata, LockWeakPtr<RAMFSInode> parent)
  30. {
  31. return adopt_nonnull_ref_or_enomem(new (nothrow) RAMFSInode(fs, metadata, move(parent)));
  32. }
  33. ErrorOr<NonnullRefPtr<RAMFSInode>> RAMFSInode::try_create_root(RAMFS& fs)
  34. {
  35. return adopt_nonnull_ref_or_enomem(new (nothrow) RAMFSInode(fs));
  36. }
  37. InodeMetadata RAMFSInode::metadata() const
  38. {
  39. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  40. return m_metadata;
  41. }
  42. ErrorOr<void> RAMFSInode::traverse_as_directory(Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const
  43. {
  44. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  45. if (!is_directory())
  46. return ENOTDIR;
  47. TRY(callback({ "."sv, identifier(), 0 }));
  48. if (m_root_directory_inode) {
  49. TRY(callback({ ".."sv, identifier(), 0 }));
  50. } else if (auto parent = m_parent.strong_ref()) {
  51. TRY(callback({ ".."sv, parent->identifier(), 0 }));
  52. }
  53. for (auto& child : m_children) {
  54. TRY(callback({ child.name->view(), child.inode->identifier(), 0 }));
  55. }
  56. return {};
  57. }
  58. ErrorOr<void> RAMFSInode::replace_child(StringView name, Inode& new_child)
  59. {
  60. MutexLocker locker(m_inode_lock);
  61. VERIFY(is_directory());
  62. VERIFY(new_child.fsid() == fsid());
  63. auto* child = find_child_by_name(name);
  64. if (!child)
  65. return ENOENT;
  66. auto old_child = child->inode;
  67. child->inode = static_cast<RAMFSInode&>(new_child);
  68. old_child->did_delete_self();
  69. // TODO: Emit a did_replace_child event.
  70. return {};
  71. }
  72. ErrorOr<NonnullOwnPtr<RAMFSInode::DataBlock>> RAMFSInode::DataBlock::create()
  73. {
  74. auto data_block_buffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(DataBlock::block_size, AllocationStrategy::AllocateNow));
  75. return TRY(adopt_nonnull_own_or_enomem(new (nothrow) DataBlock(move(data_block_buffer_vmobject))));
  76. }
  77. ErrorOr<void> RAMFSInode::ensure_allocated_blocks(size_t offset, size_t io_size)
  78. {
  79. VERIFY(m_inode_lock.is_locked());
  80. size_t block_start_index = offset / DataBlock::block_size;
  81. size_t block_last_index = ((offset + io_size) / DataBlock::block_size) + (((offset + io_size) % DataBlock::block_size) == 0 ? 0 : 1);
  82. VERIFY(block_start_index <= block_last_index);
  83. size_t original_size = m_blocks.size();
  84. Vector<size_t> allocated_block_indices;
  85. ArmedScopeGuard clean_allocated_blocks_on_failure([&] {
  86. for (auto index : allocated_block_indices)
  87. m_blocks[index].clear();
  88. MUST(m_blocks.try_resize(original_size));
  89. });
  90. if (m_blocks.size() < (block_last_index))
  91. TRY(m_blocks.try_resize(block_last_index));
  92. for (size_t block_index = block_start_index; block_index < block_last_index; block_index++) {
  93. if (!m_blocks[block_index]) {
  94. TRY(allocated_block_indices.try_append(block_index));
  95. m_blocks[block_index] = TRY(DataBlock::create());
  96. }
  97. }
  98. clean_allocated_blocks_on_failure.disarm();
  99. return {};
  100. }
  101. ErrorOr<size_t> RAMFSInode::read_bytes_from_content_space(size_t offset, size_t io_size, UserOrKernelBuffer& buffer) const
  102. {
  103. VERIFY(m_inode_lock.is_locked());
  104. VERIFY(m_metadata.size >= 0);
  105. if (offset >= static_cast<size_t>(m_metadata.size))
  106. return 0;
  107. auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "RAMFSInode Mapping Region"sv, Memory::Region::Access::Read, AllocationStrategy::Reserve));
  108. return const_cast<RAMFSInode&>(*this).do_io_on_content_space(*mapping_region, offset, io_size, buffer, false);
  109. }
  110. ErrorOr<size_t> RAMFSInode::read_bytes_locked(off_t offset, size_t size, UserOrKernelBuffer& buffer, OpenFileDescription*) const
  111. {
  112. VERIFY(m_inode_lock.is_locked());
  113. VERIFY(!is_directory());
  114. return read_bytes_from_content_space(offset, size, buffer);
  115. }
  116. ErrorOr<size_t> RAMFSInode::write_bytes_to_content_space(size_t offset, size_t io_size, UserOrKernelBuffer const& buffer)
  117. {
  118. VERIFY(m_inode_lock.is_locked());
  119. auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "RAMFSInode Mapping Region"sv, Memory::Region::Access::Write, AllocationStrategy::Reserve));
  120. return do_io_on_content_space(*mapping_region, offset, io_size, const_cast<UserOrKernelBuffer&>(buffer), true);
  121. }
  122. ErrorOr<size_t> RAMFSInode::write_bytes_locked(off_t offset, size_t size, UserOrKernelBuffer const& buffer, OpenFileDescription*)
  123. {
  124. VERIFY(m_inode_lock.is_locked());
  125. VERIFY(!is_directory());
  126. VERIFY(offset >= 0);
  127. TRY(ensure_allocated_blocks(offset, size));
  128. auto nwritten = TRY(write_bytes_to_content_space(offset, size, buffer));
  129. off_t old_size = m_metadata.size;
  130. off_t new_size = m_metadata.size;
  131. if (static_cast<off_t>(offset + size) > new_size)
  132. new_size = offset + size;
  133. if (new_size > old_size) {
  134. m_metadata.size = new_size;
  135. set_metadata_dirty(true);
  136. }
  137. did_modify_contents();
  138. return nwritten;
  139. }
  140. ErrorOr<size_t> RAMFSInode::do_io_on_content_space(Memory::Region& mapping_region, size_t offset, size_t io_size, UserOrKernelBuffer& buffer, bool write)
  141. {
  142. VERIFY(m_inode_lock.is_locked());
  143. size_t remaining_bytes = 0;
  144. if (!write) {
  145. // Note: For read operations, only perform read until the last byte.
  146. // If we are beyond the last byte, return 0 to indicate EOF.
  147. remaining_bytes = min(io_size, m_metadata.size - offset);
  148. if (remaining_bytes == 0)
  149. return 0;
  150. } else {
  151. remaining_bytes = io_size;
  152. }
  153. VERIFY(remaining_bytes != 0);
  154. UserOrKernelBuffer current_buffer = buffer.offset(0);
  155. auto block_start_index = offset / DataBlock::block_size;
  156. auto offset_in_block = offset % DataBlock::block_size;
  157. u64 block_index = block_start_index;
  158. size_t nio = 0;
  159. while (remaining_bytes > 0) {
  160. size_t current_io_size = min(DataBlock::block_size - offset_in_block, remaining_bytes);
  161. auto& block = m_blocks[block_index];
  162. if (!block && !write) {
  163. // Note: If the block does not exist then it's just a gap in the file,
  164. // so the buffer should be placed with zeroes in that section.
  165. TRY(current_buffer.memset(0, 0, current_io_size));
  166. remaining_bytes -= current_io_size;
  167. current_buffer = current_buffer.offset(current_io_size);
  168. nio += current_io_size;
  169. block_index++;
  170. // Note: Clear offset_in_block to zero to ensure that if we started from a middle of
  171. // a block, then next writes are just going to happen from the start of each block until the end.
  172. offset_in_block = 0;
  173. continue;
  174. } else if (!block) {
  175. return Error::from_errno(EIO);
  176. }
  177. NonnullLockRefPtr<Memory::AnonymousVMObject> block_vmobject = block->vmobject();
  178. mapping_region.set_vmobject(block_vmobject);
  179. mapping_region.remap();
  180. if (write)
  181. TRY(current_buffer.read(mapping_region.vaddr().offset(offset_in_block).as_ptr(), 0, current_io_size));
  182. else
  183. TRY(current_buffer.write(mapping_region.vaddr().offset(offset_in_block).as_ptr(), 0, current_io_size));
  184. current_buffer = current_buffer.offset(current_io_size);
  185. nio += current_io_size;
  186. remaining_bytes -= current_io_size;
  187. block_index++;
  188. // Note: Clear offset_in_block to zero to ensure that if we started from a middle of
  189. // a block, then next writes are just going to happen from the start of each block until the end.
  190. offset_in_block = 0;
  191. }
  192. VERIFY(nio <= io_size);
  193. return nio;
  194. }
  195. ErrorOr<void> RAMFSInode::truncate_to_block_index(size_t block_index)
  196. {
  197. VERIFY(m_inode_lock.is_locked());
  198. TRY(m_blocks.try_resize(block_index));
  199. return {};
  200. }
  201. ErrorOr<NonnullRefPtr<Inode>> RAMFSInode::lookup(StringView name)
  202. {
  203. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  204. VERIFY(is_directory());
  205. if (name == ".")
  206. return *this;
  207. if (name == "..") {
  208. if (auto parent = m_parent.strong_ref())
  209. return *parent;
  210. return ENOENT;
  211. }
  212. auto* child = find_child_by_name(name);
  213. if (!child)
  214. return ENOENT;
  215. return child->inode;
  216. }
  217. RAMFSInode::Child* RAMFSInode::find_child_by_name(StringView name)
  218. {
  219. for (auto& child : m_children) {
  220. if (child.name->view() == name)
  221. return &child;
  222. }
  223. return nullptr;
  224. }
  225. ErrorOr<void> RAMFSInode::flush_metadata()
  226. {
  227. // We don't really have any metadata that could become dirty.
  228. // The only reason we even call set_metadata_dirty() is
  229. // to let the watchers know we have updates. Once that is
  230. // switched to a different mechanism, we can stop ever marking
  231. // our metadata as dirty at all.
  232. set_metadata_dirty(false);
  233. return {};
  234. }
  235. ErrorOr<void> RAMFSInode::chmod(mode_t mode)
  236. {
  237. MutexLocker locker(m_inode_lock);
  238. m_metadata.mode = mode;
  239. set_metadata_dirty(true);
  240. return {};
  241. }
  242. ErrorOr<void> RAMFSInode::chown(UserID uid, GroupID gid)
  243. {
  244. MutexLocker locker(m_inode_lock);
  245. m_metadata.uid = uid;
  246. m_metadata.gid = gid;
  247. set_metadata_dirty(true);
  248. return {};
  249. }
  250. ErrorOr<NonnullRefPtr<Inode>> RAMFSInode::create_child(StringView name, mode_t mode, dev_t dev, UserID uid, GroupID gid)
  251. {
  252. MutexLocker locker(m_inode_lock);
  253. auto now = kgettimeofday();
  254. InodeMetadata metadata;
  255. metadata.mode = mode;
  256. metadata.uid = uid;
  257. metadata.gid = gid;
  258. metadata.atime = now;
  259. metadata.ctime = now;
  260. metadata.mtime = now;
  261. metadata.major_device = major_from_encoded_device(dev);
  262. metadata.minor_device = minor_from_encoded_device(dev);
  263. auto child = TRY(RAMFSInode::try_create(fs(), metadata, *this));
  264. TRY(add_child(*child, name, mode));
  265. return child;
  266. }
  267. ErrorOr<void> RAMFSInode::add_child(Inode& child, StringView name, mode_t)
  268. {
  269. VERIFY(is_directory());
  270. VERIFY(child.fsid() == fsid());
  271. if (name.length() > NAME_MAX)
  272. return ENAMETOOLONG;
  273. MutexLocker locker(m_inode_lock);
  274. for (auto const& existing_child : m_children) {
  275. if (existing_child.name->view() == name)
  276. return EEXIST;
  277. }
  278. auto name_kstring = TRY(KString::try_create(name));
  279. // Balanced by `delete` in remove_child()
  280. auto* child_entry = new (nothrow) Child { move(name_kstring), static_cast<RAMFSInode&>(child) };
  281. if (!child_entry)
  282. return ENOMEM;
  283. m_children.append(*child_entry);
  284. did_add_child(child.identifier(), name);
  285. return {};
  286. }
  287. ErrorOr<void> RAMFSInode::remove_child(StringView name)
  288. {
  289. MutexLocker locker(m_inode_lock);
  290. VERIFY(is_directory());
  291. if (name == "." || name == "..")
  292. return {};
  293. auto* child = find_child_by_name(name);
  294. if (!child)
  295. return ENOENT;
  296. auto child_id = child->inode->identifier();
  297. child->inode->did_delete_self();
  298. m_children.remove(*child);
  299. did_remove_child(child_id, name);
  300. // Balanced by `new` in add_child()
  301. delete child;
  302. return {};
  303. }
  304. ErrorOr<void> RAMFSInode::truncate(u64 size)
  305. {
  306. MutexLocker locker(m_inode_lock);
  307. VERIFY(!is_directory());
  308. u64 block_index = size / DataBlock::block_size + ((size % DataBlock::block_size == 0) ? 0 : 1);
  309. TRY(truncate_to_block_index(block_index));
  310. u64 last_possible_block_index = size / DataBlock::block_size;
  311. if ((size % DataBlock::block_size != 0) && m_blocks[last_possible_block_index]) {
  312. auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "RAMFSInode Mapping Region"sv, Memory::Region::Access::Write, AllocationStrategy::Reserve));
  313. VERIFY(m_blocks[last_possible_block_index]);
  314. NonnullLockRefPtr<Memory::AnonymousVMObject> block_vmobject = m_blocks[last_possible_block_index]->vmobject();
  315. mapping_region->set_vmobject(block_vmobject);
  316. mapping_region->remap();
  317. memset(mapping_region->vaddr().offset(size % DataBlock::block_size).as_ptr(), 0, DataBlock::block_size - (size % DataBlock::block_size));
  318. }
  319. m_metadata.size = size;
  320. set_metadata_dirty(true);
  321. return {};
  322. }
  323. ErrorOr<void> RAMFSInode::update_timestamps(Optional<UnixDateTime> atime, Optional<UnixDateTime> ctime, Optional<UnixDateTime> mtime)
  324. {
  325. MutexLocker locker(m_inode_lock);
  326. if (atime.has_value())
  327. m_metadata.atime = atime.value();
  328. if (ctime.has_value())
  329. m_metadata.ctime = ctime.value();
  330. if (mtime.has_value())
  331. m_metadata.mtime = mtime.value();
  332. set_metadata_dirty(true);
  333. return {};
  334. }
  335. }