Region.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/EnumBits.h>
  8. #include <AK/IntrusiveList.h>
  9. #include <AK/Weakable.h>
  10. #include <Kernel/Forward.h>
  11. #include <Kernel/KString.h>
  12. #include <Kernel/Memory/PageFaultResponse.h>
  13. #include <Kernel/Memory/VirtualRangeAllocator.h>
  14. #include <Kernel/Sections.h>
  15. #include <Kernel/UnixTypes.h>
  16. namespace Kernel {
  17. class PageFault;
  18. }
  19. namespace Kernel::Memory {
  20. enum class ShouldFlushTLB {
  21. No,
  22. Yes,
  23. };
  24. class Region final
  25. : public Weakable<Region> {
  26. friend class MemoryManager;
  27. public:
  28. enum Access : u8 {
  29. None = 0,
  30. Read = 1,
  31. Write = 2,
  32. Execute = 4,
  33. HasBeenReadable = 16,
  34. HasBeenWritable = 32,
  35. HasBeenExecutable = 64,
  36. ReadOnly = Read,
  37. ReadWrite = Read | Write,
  38. ReadWriteExecute = Read | Write | Execute,
  39. };
  40. enum class Cacheable {
  41. No = 0,
  42. Yes,
  43. };
  44. static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
  45. static ErrorOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
  46. ~Region();
  47. [[nodiscard]] VirtualRange const& range() const { return m_range; }
  48. [[nodiscard]] VirtualAddress vaddr() const { return m_range.base(); }
  49. [[nodiscard]] size_t size() const { return m_range.size(); }
  50. [[nodiscard]] bool is_readable() const { return (m_access & Access::Read) == Access::Read; }
  51. [[nodiscard]] bool is_writable() const { return (m_access & Access::Write) == Access::Write; }
  52. [[nodiscard]] bool is_executable() const { return (m_access & Access::Execute) == Access::Execute; }
  53. [[nodiscard]] bool has_been_readable() const { return (m_access & Access::HasBeenReadable) == Access::HasBeenReadable; }
  54. [[nodiscard]] bool has_been_writable() const { return (m_access & Access::HasBeenWritable) == Access::HasBeenWritable; }
  55. [[nodiscard]] bool has_been_executable() const { return (m_access & Access::HasBeenExecutable) == Access::HasBeenExecutable; }
  56. [[nodiscard]] bool is_cacheable() const { return m_cacheable; }
  57. [[nodiscard]] StringView name() const { return m_name ? m_name->view() : StringView {}; }
  58. [[nodiscard]] OwnPtr<KString> take_name() { return move(m_name); }
  59. [[nodiscard]] Region::Access access() const { return static_cast<Region::Access>(m_access); }
  60. void set_name(OwnPtr<KString> name) { m_name = move(name); }
  61. [[nodiscard]] VMObject const& vmobject() const { return *m_vmobject; }
  62. [[nodiscard]] VMObject& vmobject() { return *m_vmobject; }
  63. void set_vmobject(NonnullRefPtr<VMObject>&&);
  64. [[nodiscard]] bool is_shared() const { return m_shared; }
  65. void set_shared(bool shared) { m_shared = shared; }
  66. [[nodiscard]] bool is_stack() const { return m_stack; }
  67. void set_stack(bool stack) { m_stack = stack; }
  68. [[nodiscard]] bool is_mmap() const { return m_mmap; }
  69. void set_mmap(bool mmap) { m_mmap = mmap; }
  70. [[nodiscard]] bool is_user() const { return !is_kernel(); }
  71. [[nodiscard]] bool is_kernel() const { return vaddr().get() < USER_RANGE_BASE || vaddr().get() >= kernel_mapping_base; }
  72. PageFaultResponse handle_fault(PageFault const&);
  73. ErrorOr<NonnullOwnPtr<Region>> try_clone();
  74. [[nodiscard]] bool contains(VirtualAddress vaddr) const
  75. {
  76. return m_range.contains(vaddr);
  77. }
  78. [[nodiscard]] bool contains(VirtualRange const& range) const
  79. {
  80. return m_range.contains(range);
  81. }
  82. [[nodiscard]] unsigned page_index_from_address(VirtualAddress vaddr) const
  83. {
  84. return (vaddr - m_range.base()).get() / PAGE_SIZE;
  85. }
  86. [[nodiscard]] VirtualAddress vaddr_from_page_index(size_t page_index) const
  87. {
  88. return vaddr().offset(page_index * PAGE_SIZE);
  89. }
  90. [[nodiscard]] bool translate_vmobject_page(size_t& index) const
  91. {
  92. auto first_index = first_page_index();
  93. if (index < first_index) {
  94. index = first_index;
  95. return false;
  96. }
  97. index -= first_index;
  98. auto total_page_count = this->page_count();
  99. if (index >= total_page_count) {
  100. index = first_index + total_page_count - 1;
  101. return false;
  102. }
  103. return true;
  104. }
  105. [[nodiscard]] ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const
  106. {
  107. return first_page_index() + page_index;
  108. }
  109. [[nodiscard]] size_t first_page_index() const
  110. {
  111. return m_offset_in_vmobject / PAGE_SIZE;
  112. }
  113. [[nodiscard]] size_t page_count() const
  114. {
  115. return size() / PAGE_SIZE;
  116. }
  117. PhysicalPage const* physical_page(size_t index) const;
  118. RefPtr<PhysicalPage>& physical_page_slot(size_t index);
  119. [[nodiscard]] size_t offset_in_vmobject() const
  120. {
  121. return m_offset_in_vmobject;
  122. }
  123. [[nodiscard]] size_t offset_in_vmobject_from_vaddr(VirtualAddress vaddr) const
  124. {
  125. return m_offset_in_vmobject + vaddr.get() - this->vaddr().get();
  126. }
  127. [[nodiscard]] size_t amount_resident() const;
  128. [[nodiscard]] size_t amount_shared() const;
  129. [[nodiscard]] size_t amount_dirty() const;
  130. [[nodiscard]] bool should_cow(size_t page_index) const;
  131. void set_should_cow(size_t page_index, bool);
  132. [[nodiscard]] size_t cow_pages() const;
  133. void set_readable(bool b) { set_access_bit(Access::Read, b); }
  134. void set_writable(bool b) { set_access_bit(Access::Write, b); }
  135. void set_executable(bool b) { set_access_bit(Access::Execute, b); }
  136. void unsafe_clear_access() { m_access = Region::None; }
  137. void set_page_directory(PageDirectory&);
  138. ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
  139. enum class ShouldDeallocateVirtualRange {
  140. No,
  141. Yes,
  142. };
  143. void unmap(ShouldDeallocateVirtualRange = ShouldDeallocateVirtualRange::Yes);
  144. void remap();
  145. void clear_to_zero();
  146. [[nodiscard]] bool is_syscall_region() const { return m_syscall_region; }
  147. void set_syscall_region(bool b) { m_syscall_region = b; }
  148. private:
  149. Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
  150. [[nodiscard]] bool remap_vmobject_page(size_t page_index, bool with_flush = true);
  151. [[nodiscard]] bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
  152. void set_access_bit(Access access, bool b)
  153. {
  154. if (b)
  155. m_access |= access | (access << 4);
  156. else
  157. m_access &= ~access;
  158. }
  159. [[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index);
  160. [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index);
  161. [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index);
  162. [[nodiscard]] bool map_individual_page_impl(size_t page_index);
  163. RefPtr<PageDirectory> m_page_directory;
  164. VirtualRange m_range;
  165. size_t m_offset_in_vmobject { 0 };
  166. NonnullRefPtr<VMObject> m_vmobject;
  167. OwnPtr<KString> m_name;
  168. u8 m_access { Region::None };
  169. bool m_shared : 1 { false };
  170. bool m_cacheable : 1 { false };
  171. bool m_stack : 1 { false };
  172. bool m_mmap : 1 { false };
  173. bool m_syscall_region : 1 { false };
  174. IntrusiveListNode<Region> m_memory_manager_list_node;
  175. IntrusiveListNode<Region> m_vmobject_list_node;
  176. public:
  177. using ListInMemoryManager = IntrusiveList<&Region::m_memory_manager_list_node>;
  178. using ListInVMObject = IntrusiveList<&Region::m_vmobject_list_node>;
  179. };
  180. AK_ENUM_BITWISE_OPERATORS(Region::Access)
  181. inline constexpr Region::Access prot_to_region_access_flags(int prot)
  182. {
  183. Region::Access access = Region::Access::None;
  184. if ((prot & PROT_READ) == PROT_READ)
  185. access |= Region::Access::Read;
  186. if ((prot & PROT_WRITE) == PROT_WRITE)
  187. access |= Region::Access::Write;
  188. if ((prot & PROT_EXEC) == PROT_EXEC)
  189. access |= Region::Access::Execute;
  190. return access;
  191. }
  192. inline constexpr int region_access_flags_to_prot(Region::Access access)
  193. {
  194. int prot = 0;
  195. if ((access & Region::Access::Read) == Region::Access::Read)
  196. prot |= PROT_READ;
  197. if ((access & Region::Access::Write) == Region::Access::Write)
  198. prot |= PROT_WRITE;
  199. if ((access & Region::Access::Execute) == Region::Access::Execute)
  200. prot |= PROT_EXEC;
  201. return prot;
  202. }
  203. }