Inode.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. * Copyright (c) 2019-2020, Sergey Bugaev <bugaevc@serenityos.org>
  3. * Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <Kernel/FileSystem/TmpFS/Inode.h>
  8. #include <Kernel/Process.h>
  9. namespace Kernel {
  10. TmpFSInode::TmpFSInode(TmpFS& fs, InodeMetadata const& metadata, LockWeakPtr<TmpFSInode> parent)
  11. : Inode(fs, fs.next_inode_index())
  12. , m_metadata(metadata)
  13. , m_parent(move(parent))
  14. {
  15. m_metadata.inode = identifier();
  16. }
  17. TmpFSInode::~TmpFSInode() = default;
  18. ErrorOr<NonnullLockRefPtr<TmpFSInode>> TmpFSInode::try_create(TmpFS& fs, InodeMetadata const& metadata, LockWeakPtr<TmpFSInode> parent)
  19. {
  20. return adopt_nonnull_lock_ref_or_enomem(new (nothrow) TmpFSInode(fs, metadata, move(parent)));
  21. }
  22. ErrorOr<NonnullLockRefPtr<TmpFSInode>> TmpFSInode::try_create_root(TmpFS& fs)
  23. {
  24. InodeMetadata metadata;
  25. auto now = kgettimeofday().to_truncated_seconds();
  26. metadata.atime = now;
  27. metadata.ctime = now;
  28. metadata.mtime = now;
  29. metadata.mode = S_IFDIR | S_ISVTX | 0777;
  30. return try_create(fs, metadata, {});
  31. }
  32. InodeMetadata TmpFSInode::metadata() const
  33. {
  34. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  35. return m_metadata;
  36. }
  37. ErrorOr<void> TmpFSInode::traverse_as_directory(Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const
  38. {
  39. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  40. if (!is_directory())
  41. return ENOTDIR;
  42. TRY(callback({ "."sv, identifier(), 0 }));
  43. if (auto parent = m_parent.strong_ref())
  44. TRY(callback({ ".."sv, parent->identifier(), 0 }));
  45. for (auto& child : m_children) {
  46. TRY(callback({ child.name->view(), child.inode->identifier(), 0 }));
  47. }
  48. return {};
  49. }
  50. ErrorOr<NonnullOwnPtr<TmpFSInode::DataBlock>> TmpFSInode::DataBlock::create()
  51. {
  52. auto data_block_buffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(DataBlock::block_size, AllocationStrategy::AllocateNow));
  53. return TRY(adopt_nonnull_own_or_enomem(new (nothrow) DataBlock(move(data_block_buffer_vmobject))));
  54. }
  55. ErrorOr<void> TmpFSInode::ensure_allocated_blocks(size_t offset, size_t io_size)
  56. {
  57. VERIFY(m_inode_lock.is_locked());
  58. size_t block_start_index = offset / DataBlock::block_size;
  59. size_t block_last_index = ((offset + io_size) / DataBlock::block_size) + (((offset + io_size) % DataBlock::block_size) == 0 ? 0 : 1);
  60. VERIFY(block_start_index <= block_last_index);
  61. size_t original_size = m_blocks.size();
  62. Vector<size_t> allocated_block_indices;
  63. ArmedScopeGuard clean_allocated_blocks_on_failure([&] {
  64. for (auto index : allocated_block_indices)
  65. m_blocks[index].clear();
  66. MUST(m_blocks.try_resize(original_size));
  67. });
  68. if (m_blocks.size() < (block_last_index))
  69. TRY(m_blocks.try_resize(block_last_index));
  70. for (size_t block_index = block_start_index; block_index < block_last_index; block_index++) {
  71. if (!m_blocks[block_index]) {
  72. TRY(allocated_block_indices.try_append(block_index));
  73. m_blocks[block_index] = TRY(DataBlock::create());
  74. }
  75. }
  76. clean_allocated_blocks_on_failure.disarm();
  77. return {};
  78. }
  79. ErrorOr<size_t> TmpFSInode::read_bytes_from_content_space(size_t offset, size_t io_size, UserOrKernelBuffer& buffer) const
  80. {
  81. VERIFY(m_inode_lock.is_locked());
  82. VERIFY(m_metadata.size >= 0);
  83. if (static_cast<size_t>(m_metadata.size) < offset)
  84. return 0;
  85. auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "TmpFSInode Mapping Region"sv, Memory::Region::Access::Read, AllocationStrategy::Reserve));
  86. return const_cast<TmpFSInode&>(*this).do_io_on_content_space(*mapping_region, offset, io_size, buffer, false);
  87. }
  88. ErrorOr<size_t> TmpFSInode::read_bytes_locked(off_t offset, size_t size, UserOrKernelBuffer& buffer, OpenFileDescription*) const
  89. {
  90. VERIFY(m_inode_lock.is_locked());
  91. VERIFY(!is_directory());
  92. return read_bytes_from_content_space(offset, size, buffer);
  93. }
  94. ErrorOr<size_t> TmpFSInode::write_bytes_to_content_space(size_t offset, size_t io_size, UserOrKernelBuffer const& buffer)
  95. {
  96. VERIFY(m_inode_lock.is_locked());
  97. auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "TmpFSInode Mapping Region"sv, Memory::Region::Access::Write, AllocationStrategy::Reserve));
  98. return do_io_on_content_space(*mapping_region, offset, io_size, const_cast<UserOrKernelBuffer&>(buffer), true);
  99. }
  100. ErrorOr<size_t> TmpFSInode::write_bytes_locked(off_t offset, size_t size, UserOrKernelBuffer const& buffer, OpenFileDescription*)
  101. {
  102. VERIFY(m_inode_lock.is_locked());
  103. VERIFY(!is_directory());
  104. VERIFY(offset >= 0);
  105. TRY(ensure_allocated_blocks(offset, size));
  106. auto nwritten = TRY(write_bytes_to_content_space(offset, size, buffer));
  107. off_t old_size = m_metadata.size;
  108. off_t new_size = m_metadata.size;
  109. if (static_cast<off_t>(offset + size) > new_size)
  110. new_size = offset + size;
  111. if (new_size > old_size) {
  112. m_metadata.size = new_size;
  113. set_metadata_dirty(true);
  114. }
  115. did_modify_contents();
  116. return nwritten;
  117. }
  118. ErrorOr<size_t> TmpFSInode::do_io_on_content_space(Memory::Region& mapping_region, size_t offset, size_t io_size, UserOrKernelBuffer& buffer, bool write)
  119. {
  120. VERIFY(m_inode_lock.is_locked());
  121. size_t remaining_bytes = 0;
  122. if (!write) {
  123. // Note: For read operations, only perform read until the last byte.
  124. // If we are beyond the last byte, return 0 to indicate EOF.
  125. remaining_bytes = min(io_size, m_metadata.size - offset);
  126. if (remaining_bytes == 0)
  127. return 0;
  128. } else {
  129. remaining_bytes = io_size;
  130. }
  131. VERIFY(remaining_bytes != 0);
  132. UserOrKernelBuffer current_buffer = buffer.offset(0);
  133. auto block_start_index = offset / DataBlock::block_size;
  134. auto offset_in_block = offset % DataBlock::block_size;
  135. u64 block_index = block_start_index;
  136. size_t nio = 0;
  137. while (remaining_bytes > 0) {
  138. size_t current_io_size = min(DataBlock::block_size - offset_in_block, remaining_bytes);
  139. auto& block = m_blocks[block_index];
  140. if (!block && !write) {
  141. // Note: If the block does not exist then it's just a gap in the file,
  142. // so the buffer should be placed with zeroes in that section.
  143. TRY(current_buffer.memset(0, 0, current_io_size));
  144. remaining_bytes -= current_io_size;
  145. current_buffer = current_buffer.offset(current_io_size);
  146. nio += current_io_size;
  147. block_index++;
  148. // Note: Clear offset_in_block to zero to ensure that if we started from a middle of
  149. // a block, then next writes are just going to happen from the start of each block until the end.
  150. offset_in_block = 0;
  151. continue;
  152. } else if (!block) {
  153. return Error::from_errno(EIO);
  154. }
  155. NonnullLockRefPtr<Memory::AnonymousVMObject> block_vmobject = block->vmobject();
  156. mapping_region.set_vmobject(block_vmobject);
  157. mapping_region.remap();
  158. if (write)
  159. TRY(current_buffer.read(mapping_region.vaddr().offset(offset_in_block).as_ptr(), 0, current_io_size));
  160. else
  161. TRY(current_buffer.write(mapping_region.vaddr().offset(offset_in_block).as_ptr(), 0, current_io_size));
  162. current_buffer = current_buffer.offset(current_io_size);
  163. nio += current_io_size;
  164. remaining_bytes -= current_io_size;
  165. block_index++;
  166. // Note: Clear offset_in_block to zero to ensure that if we started from a middle of
  167. // a block, then next writes are just going to happen from the start of each block until the end.
  168. offset_in_block = 0;
  169. }
  170. VERIFY(nio <= io_size);
  171. return nio;
  172. }
  173. ErrorOr<void> TmpFSInode::truncate_to_block_index(size_t block_index)
  174. {
  175. VERIFY(m_inode_lock.is_locked());
  176. TRY(m_blocks.try_resize(block_index));
  177. return {};
  178. }
  179. ErrorOr<NonnullLockRefPtr<Inode>> TmpFSInode::lookup(StringView name)
  180. {
  181. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  182. VERIFY(is_directory());
  183. if (name == ".")
  184. return *this;
  185. if (name == "..") {
  186. if (auto parent = m_parent.strong_ref())
  187. return parent.release_nonnull();
  188. return ENOENT;
  189. }
  190. auto* child = find_child_by_name(name);
  191. if (!child)
  192. return ENOENT;
  193. return child->inode;
  194. }
  195. TmpFSInode::Child* TmpFSInode::find_child_by_name(StringView name)
  196. {
  197. for (auto& child : m_children) {
  198. if (child.name->view() == name)
  199. return &child;
  200. }
  201. return nullptr;
  202. }
  203. ErrorOr<void> TmpFSInode::flush_metadata()
  204. {
  205. // We don't really have any metadata that could become dirty.
  206. // The only reason we even call set_metadata_dirty() is
  207. // to let the watchers know we have updates. Once that is
  208. // switched to a different mechanism, we can stop ever marking
  209. // our metadata as dirty at all.
  210. set_metadata_dirty(false);
  211. return {};
  212. }
  213. ErrorOr<void> TmpFSInode::chmod(mode_t mode)
  214. {
  215. MutexLocker locker(m_inode_lock);
  216. m_metadata.mode = mode;
  217. set_metadata_dirty(true);
  218. return {};
  219. }
  220. ErrorOr<void> TmpFSInode::chown(UserID uid, GroupID gid)
  221. {
  222. MutexLocker locker(m_inode_lock);
  223. m_metadata.uid = uid;
  224. m_metadata.gid = gid;
  225. set_metadata_dirty(true);
  226. return {};
  227. }
  228. ErrorOr<NonnullLockRefPtr<Inode>> TmpFSInode::create_child(StringView name, mode_t mode, dev_t dev, UserID uid, GroupID gid)
  229. {
  230. MutexLocker locker(m_inode_lock);
  231. time_t now = kgettimeofday().to_truncated_seconds();
  232. InodeMetadata metadata;
  233. metadata.mode = mode;
  234. metadata.uid = uid;
  235. metadata.gid = gid;
  236. metadata.atime = now;
  237. metadata.ctime = now;
  238. metadata.mtime = now;
  239. metadata.major_device = major_from_encoded_device(dev);
  240. metadata.minor_device = minor_from_encoded_device(dev);
  241. auto child = TRY(TmpFSInode::try_create(fs(), metadata, *this));
  242. TRY(add_child(*child, name, mode));
  243. return child;
  244. }
  245. ErrorOr<void> TmpFSInode::add_child(Inode& child, StringView name, mode_t)
  246. {
  247. VERIFY(is_directory());
  248. VERIFY(child.fsid() == fsid());
  249. if (name.length() > NAME_MAX)
  250. return ENAMETOOLONG;
  251. MutexLocker locker(m_inode_lock);
  252. for (auto const& existing_child : m_children) {
  253. if (existing_child.name->view() == name)
  254. return EEXIST;
  255. }
  256. auto name_kstring = TRY(KString::try_create(name));
  257. // Balanced by `delete` in remove_child()
  258. auto* child_entry = new (nothrow) Child { move(name_kstring), static_cast<TmpFSInode&>(child) };
  259. if (!child_entry)
  260. return ENOMEM;
  261. m_children.append(*child_entry);
  262. did_add_child(child.identifier(), name);
  263. return {};
  264. }
  265. ErrorOr<void> TmpFSInode::remove_child(StringView name)
  266. {
  267. MutexLocker locker(m_inode_lock);
  268. VERIFY(is_directory());
  269. if (name == "." || name == "..")
  270. return {};
  271. auto* child = find_child_by_name(name);
  272. if (!child)
  273. return ENOENT;
  274. auto child_id = child->inode->identifier();
  275. child->inode->did_delete_self();
  276. m_children.remove(*child);
  277. did_remove_child(child_id, name);
  278. // Balanced by `new` in add_child()
  279. delete child;
  280. return {};
  281. }
  282. ErrorOr<void> TmpFSInode::truncate(u64 size)
  283. {
  284. MutexLocker locker(m_inode_lock);
  285. VERIFY(!is_directory());
  286. u64 block_index = size / DataBlock::block_size + ((size % DataBlock::block_size == 0) ? 0 : 1);
  287. TRY(truncate_to_block_index(block_index));
  288. u64 last_possible_block_index = size / DataBlock::block_size;
  289. if ((size % DataBlock::block_size != 0) && m_blocks[last_possible_block_index]) {
  290. auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "TmpFSInode Mapping Region"sv, Memory::Region::Access::Write, AllocationStrategy::Reserve));
  291. VERIFY(m_blocks[last_possible_block_index]);
  292. NonnullLockRefPtr<Memory::AnonymousVMObject> block_vmobject = m_blocks[last_possible_block_index]->vmobject();
  293. mapping_region->set_vmobject(block_vmobject);
  294. mapping_region->remap();
  295. memset(mapping_region->vaddr().offset(size % DataBlock::block_size).as_ptr(), 0, DataBlock::block_size - (size % DataBlock::block_size));
  296. }
  297. m_metadata.size = size;
  298. set_metadata_dirty(true);
  299. return {};
  300. }
  301. ErrorOr<void> TmpFSInode::update_timestamps(Optional<time_t> atime, Optional<time_t> ctime, Optional<time_t> mtime)
  302. {
  303. MutexLocker locker(m_inode_lock);
  304. if (atime.has_value())
  305. m_metadata.atime = atime.value();
  306. if (ctime.has_value())
  307. m_metadata.ctime = ctime.value();
  308. if (mtime.has_value())
  309. m_metadata.ctime = mtime.value();
  310. set_metadata_dirty(true);
  311. return {};
  312. }
  313. }