TmpFS.cpp 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * Copyright (c) 2019-2020, Sergey Bugaev <bugaevc@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/FileSystem/TmpFS.h>
  7. #include <Kernel/Process.h>
  8. #include <LibC/limits.h>
  9. namespace Kernel {
  10. ErrorOr<NonnullRefPtr<TmpFS>> TmpFS::try_create()
  11. {
  12. return adopt_nonnull_ref_or_enomem(new (nothrow) TmpFS);
  13. }
  14. TmpFS::TmpFS()
  15. {
  16. }
  17. TmpFS::~TmpFS()
  18. {
  19. }
  20. ErrorOr<void> TmpFS::initialize()
  21. {
  22. m_root_inode = TRY(TmpFSInode::try_create_root(*this));
  23. return {};
  24. }
  25. Inode& TmpFS::root_inode()
  26. {
  27. VERIFY(!m_root_inode.is_null());
  28. return *m_root_inode;
  29. }
  30. void TmpFS::register_inode(TmpFSInode& inode)
  31. {
  32. MutexLocker locker(m_lock);
  33. VERIFY(inode.identifier().fsid() == fsid());
  34. auto index = inode.identifier().index();
  35. m_inodes.set(index, inode);
  36. }
  37. void TmpFS::unregister_inode(InodeIdentifier identifier)
  38. {
  39. MutexLocker locker(m_lock);
  40. VERIFY(identifier.fsid() == fsid());
  41. m_inodes.remove(identifier.index());
  42. }
  43. unsigned TmpFS::next_inode_index()
  44. {
  45. MutexLocker locker(m_lock);
  46. return m_next_inode_index++;
  47. }
  48. ErrorOr<NonnullRefPtr<Inode>> TmpFS::get_inode(InodeIdentifier identifier) const
  49. {
  50. MutexLocker locker(m_lock, Mutex::Mode::Shared);
  51. VERIFY(identifier.fsid() == fsid());
  52. auto it = m_inodes.find(identifier.index());
  53. if (it == m_inodes.end())
  54. return ENOENT;
  55. return it->value;
  56. }
  57. TmpFSInode::TmpFSInode(TmpFS& fs, const InodeMetadata& metadata, InodeIdentifier parent)
  58. : Inode(fs, fs.next_inode_index())
  59. , m_metadata(metadata)
  60. , m_parent(parent)
  61. {
  62. m_metadata.inode = identifier();
  63. }
  64. TmpFSInode::~TmpFSInode()
  65. {
  66. }
  67. ErrorOr<NonnullRefPtr<TmpFSInode>> TmpFSInode::try_create(TmpFS& fs, InodeMetadata const& metadata, InodeIdentifier parent)
  68. {
  69. auto inode = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) TmpFSInode(fs, metadata, parent)));
  70. fs.register_inode(inode);
  71. return inode;
  72. }
  73. ErrorOr<NonnullRefPtr<TmpFSInode>> TmpFSInode::try_create_root(TmpFS& fs)
  74. {
  75. InodeMetadata metadata;
  76. auto now = kgettimeofday().to_truncated_seconds();
  77. metadata.atime = now;
  78. metadata.ctime = now;
  79. metadata.mtime = now;
  80. metadata.mode = S_IFDIR | S_ISVTX | 0777;
  81. return try_create(fs, metadata, { fs.fsid(), 1 });
  82. }
  83. InodeMetadata TmpFSInode::metadata() const
  84. {
  85. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  86. return m_metadata;
  87. }
  88. ErrorOr<void> TmpFSInode::traverse_as_directory(Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const
  89. {
  90. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  91. if (!is_directory())
  92. return ENOTDIR;
  93. TRY(callback({ ".", identifier(), 0 }));
  94. TRY(callback({ "..", m_parent, 0 }));
  95. for (auto& child : m_children) {
  96. TRY(callback({ child.name->view(), child.inode->identifier(), 0 }));
  97. }
  98. return {};
  99. }
  100. ErrorOr<size_t> TmpFSInode::read_bytes(off_t offset, size_t size, UserOrKernelBuffer& buffer, OpenFileDescription*) const
  101. {
  102. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  103. VERIFY(!is_directory());
  104. VERIFY(offset >= 0);
  105. if (!m_content)
  106. return 0;
  107. if (offset >= m_metadata.size)
  108. return 0;
  109. if (static_cast<off_t>(size) > m_metadata.size - offset)
  110. size = m_metadata.size - offset;
  111. TRY(buffer.write(m_content->data() + offset, size));
  112. return size;
  113. }
  114. ErrorOr<size_t> TmpFSInode::write_bytes(off_t offset, size_t size, const UserOrKernelBuffer& buffer, OpenFileDescription*)
  115. {
  116. MutexLocker locker(m_inode_lock);
  117. VERIFY(!is_directory());
  118. VERIFY(offset >= 0);
  119. TRY(prepare_to_write_data());
  120. off_t old_size = m_metadata.size;
  121. off_t new_size = m_metadata.size;
  122. if (static_cast<off_t>(offset + size) > new_size)
  123. new_size = offset + size;
  124. if (static_cast<u64>(new_size) > (NumericLimits<size_t>::max() / 2)) // on 32-bit, size_t might be 32 bits while off_t is 64 bits
  125. return ENOMEM; // we won't be able to resize to this capacity
  126. if (new_size > old_size) {
  127. if (m_content && static_cast<off_t>(m_content->capacity()) >= new_size) {
  128. m_content->set_size(new_size);
  129. } else {
  130. // Grow the content buffer 2x the new sizeto accommodate repeating write() calls.
  131. // Note that we're not actually committing physical memory to the buffer
  132. // until it's needed. We only grow VM here.
  133. // FIXME: Fix this so that no memcpy() is necessary, and we can just grow the
  134. // KBuffer and it will add physical pages as needed while keeping the
  135. // existing ones.
  136. auto tmp = TRY(KBuffer::try_create_with_size(new_size * 2));
  137. tmp->set_size(new_size);
  138. if (m_content)
  139. memcpy(tmp->data(), m_content->data(), old_size);
  140. m_content = move(tmp);
  141. }
  142. m_metadata.size = new_size;
  143. set_metadata_dirty(true);
  144. }
  145. TRY(buffer.read(m_content->data() + offset, size)); // TODO: partial reads?
  146. did_modify_contents();
  147. return size;
  148. }
  149. ErrorOr<NonnullRefPtr<Inode>> TmpFSInode::lookup(StringView name)
  150. {
  151. MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
  152. VERIFY(is_directory());
  153. if (name == ".")
  154. return *this;
  155. if (name == "..")
  156. return fs().get_inode(m_parent);
  157. auto* child = find_child_by_name(name);
  158. if (!child)
  159. return ENOENT;
  160. return child->inode;
  161. }
  162. TmpFSInode::Child* TmpFSInode::find_child_by_name(StringView name)
  163. {
  164. for (auto& child : m_children) {
  165. if (child.name->view() == name)
  166. return &child;
  167. }
  168. return nullptr;
  169. }
  170. ErrorOr<void> TmpFSInode::flush_metadata()
  171. {
  172. // We don't really have any metadata that could become dirty.
  173. // The only reason we even call set_metadata_dirty() is
  174. // to let the watchers know we have updates. Once that is
  175. // switched to a different mechanism, we can stop ever marking
  176. // our metadata as dirty at all.
  177. set_metadata_dirty(false);
  178. return {};
  179. }
  180. ErrorOr<void> TmpFSInode::chmod(mode_t mode)
  181. {
  182. MutexLocker locker(m_inode_lock);
  183. m_metadata.mode = mode;
  184. set_metadata_dirty(true);
  185. return {};
  186. }
  187. ErrorOr<void> TmpFSInode::chown(UserID uid, GroupID gid)
  188. {
  189. MutexLocker locker(m_inode_lock);
  190. m_metadata.uid = uid;
  191. m_metadata.gid = gid;
  192. set_metadata_dirty(true);
  193. return {};
  194. }
  195. ErrorOr<NonnullRefPtr<Inode>> TmpFSInode::create_child(StringView name, mode_t mode, dev_t dev, UserID uid, GroupID gid)
  196. {
  197. MutexLocker locker(m_inode_lock);
  198. // TODO: Support creating devices on TmpFS.
  199. if (dev != 0)
  200. return ENOTSUP;
  201. time_t now = kgettimeofday().to_truncated_seconds();
  202. InodeMetadata metadata;
  203. metadata.mode = mode;
  204. metadata.uid = uid;
  205. metadata.gid = gid;
  206. metadata.atime = now;
  207. metadata.ctime = now;
  208. metadata.mtime = now;
  209. auto child = TRY(TmpFSInode::try_create(fs(), metadata, identifier()));
  210. TRY(add_child(*child, name, mode));
  211. return child;
  212. }
  213. ErrorOr<void> TmpFSInode::add_child(Inode& child, StringView name, mode_t)
  214. {
  215. VERIFY(is_directory());
  216. VERIFY(child.fsid() == fsid());
  217. if (name.length() > NAME_MAX)
  218. return ENAMETOOLONG;
  219. MutexLocker locker(m_inode_lock);
  220. for (auto const& existing_child : m_children) {
  221. if (existing_child.name->view() == name)
  222. return EEXIST;
  223. }
  224. auto name_kstring = TRY(KString::try_create(name));
  225. // Balanced by `delete` in remove_child()
  226. auto* child_entry = new (nothrow) Child { move(name_kstring), static_cast<TmpFSInode&>(child) };
  227. if (!child_entry)
  228. return ENOMEM;
  229. m_children.append(*child_entry);
  230. did_add_child(child.identifier(), name);
  231. return {};
  232. }
  233. ErrorOr<void> TmpFSInode::remove_child(StringView name)
  234. {
  235. MutexLocker locker(m_inode_lock);
  236. VERIFY(is_directory());
  237. if (name == "." || name == "..")
  238. return {};
  239. auto* child = find_child_by_name(name);
  240. if (!child)
  241. return ENOENT;
  242. auto child_id = child->inode->identifier();
  243. child->inode->did_delete_self();
  244. m_children.remove(*child);
  245. did_remove_child(child_id, name);
  246. // Balanced by `new` in add_child()
  247. delete child;
  248. return {};
  249. }
  250. ErrorOr<void> TmpFSInode::truncate(u64 size)
  251. {
  252. MutexLocker locker(m_inode_lock);
  253. VERIFY(!is_directory());
  254. if (size == 0)
  255. m_content.clear();
  256. else if (!m_content) {
  257. m_content = TRY(KBuffer::try_create_with_size(size));
  258. } else if (static_cast<size_t>(size) < m_content->capacity()) {
  259. size_t prev_size = m_metadata.size;
  260. m_content->set_size(size);
  261. if (prev_size < static_cast<size_t>(size))
  262. memset(m_content->data() + prev_size, 0, size - prev_size);
  263. } else {
  264. size_t prev_size = m_metadata.size;
  265. auto tmp = TRY(KBuffer::try_create_with_size(size));
  266. memcpy(tmp->data(), m_content->data(), prev_size);
  267. m_content = move(tmp);
  268. }
  269. m_metadata.size = size;
  270. set_metadata_dirty(true);
  271. return {};
  272. }
  273. ErrorOr<void> TmpFSInode::set_atime(time_t time)
  274. {
  275. MutexLocker locker(m_inode_lock);
  276. m_metadata.atime = time;
  277. set_metadata_dirty(true);
  278. return {};
  279. }
  280. ErrorOr<void> TmpFSInode::set_ctime(time_t time)
  281. {
  282. MutexLocker locker(m_inode_lock);
  283. m_metadata.ctime = time;
  284. set_metadata_dirty(true);
  285. return {};
  286. }
  287. ErrorOr<void> TmpFSInode::set_mtime(time_t t)
  288. {
  289. MutexLocker locker(m_inode_lock);
  290. m_metadata.mtime = t;
  291. set_metadata_dirty(true);
  292. return {};
  293. }
  294. void TmpFSInode::one_ref_left()
  295. {
  296. // Destroy ourselves.
  297. fs().unregister_inode(identifier());
  298. }
  299. }