AnonymousVMObject.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <Kernel/PhysicalAddress.h>
  8. #include <Kernel/VM/AllocationStrategy.h>
  9. #include <Kernel/VM/MemoryManager.h>
  10. #include <Kernel/VM/PageFaultResponse.h>
  11. #include <Kernel/VM/PurgeablePageRanges.h>
  12. #include <Kernel/VM/VMObject.h>
  13. namespace Kernel {
  14. class AnonymousVMObject final : public VMObject {
  15. friend class PurgeablePageRanges;
  16. public:
  17. virtual ~AnonymousVMObject() override;
  18. static RefPtr<AnonymousVMObject> try_create_with_size(size_t, AllocationStrategy);
  19. static RefPtr<AnonymousVMObject> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
  20. static RefPtr<AnonymousVMObject> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
  21. virtual RefPtr<VMObject> try_clone() override;
  22. [[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>, size_t);
  23. PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
  24. size_t cow_pages() const;
  25. bool should_cow(size_t page_index, bool) const;
  26. void set_should_cow(size_t page_index, bool);
  27. void register_purgeable_page_ranges(PurgeablePageRanges&);
  28. void unregister_purgeable_page_ranges(PurgeablePageRanges&);
  29. int purge();
  30. int purge_with_interrupts_disabled(Badge<MemoryManager>);
  31. bool is_any_volatile() const;
  32. template<IteratorFunction<VolatilePageRange const&> F>
  33. IterationDecision for_each_volatile_range(F f) const
  34. {
  35. VERIFY(m_lock.is_locked());
  36. // This is a little ugly. Basically, we're trying to find the
  37. // volatile ranges that all share, because those are the only
  38. // pages we can actually purge
  39. for (auto* purgeable_range : m_purgeable_ranges) {
  40. ScopedSpinLock purgeable_lock(purgeable_range->m_volatile_ranges_lock);
  41. for (auto& r1 : purgeable_range->volatile_ranges().ranges()) {
  42. VolatilePageRange range(r1);
  43. for (auto* purgeable_range2 : m_purgeable_ranges) {
  44. if (purgeable_range2 == purgeable_range)
  45. continue;
  46. ScopedSpinLock purgeable2_lock(purgeable_range2->m_volatile_ranges_lock);
  47. if (purgeable_range2->is_empty()) {
  48. // If just one doesn't allow any purging, we can
  49. // immediately bail
  50. return IterationDecision::Continue;
  51. }
  52. for (auto const& r2 : purgeable_range2->volatile_ranges().ranges()) {
  53. range = range.intersected(r2);
  54. if (range.is_empty())
  55. break;
  56. }
  57. if (range.is_empty())
  58. break;
  59. }
  60. if (range.is_empty())
  61. continue;
  62. IterationDecision decision = f(range);
  63. if (decision != IterationDecision::Continue)
  64. return decision;
  65. }
  66. }
  67. return IterationDecision::Continue;
  68. }
  69. template<IteratorFunction<VolatilePageRange const&> F>
  70. IterationDecision for_each_nonvolatile_range(F f) const
  71. {
  72. size_t base = 0;
  73. for_each_volatile_range([&](VolatilePageRange const& volatile_range) {
  74. if (volatile_range.base == base)
  75. return IterationDecision::Continue;
  76. IterationDecision decision = f(VolatilePageRange { base, volatile_range.base - base });
  77. if (decision != IterationDecision::Continue)
  78. return decision;
  79. base = volatile_range.base + volatile_range.count;
  80. return IterationDecision::Continue;
  81. });
  82. if (base < page_count())
  83. return f(VolatilePageRange { base, page_count() - base });
  84. return IterationDecision::Continue;
  85. }
  86. template<VoidFunction<VolatilePageRange const&> F>
  87. IterationDecision for_each_volatile_range(F f) const
  88. {
  89. return for_each_volatile_range([&](auto& range) {
  90. f(range);
  91. return IterationDecision::Continue;
  92. });
  93. }
  94. template<VoidFunction<VolatilePageRange const&> F>
  95. IterationDecision for_each_nonvolatile_range(F f) const
  96. {
  97. return for_each_nonvolatile_range([&](auto range) {
  98. f(move(range));
  99. return IterationDecision::Continue;
  100. });
  101. }
  102. private:
  103. explicit AnonymousVMObject(size_t, AllocationStrategy);
  104. explicit AnonymousVMObject(PhysicalAddress, size_t);
  105. explicit AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>>);
  106. explicit AnonymousVMObject(AnonymousVMObject const&);
  107. virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
  108. int purge_impl();
  109. void update_volatile_cache();
  110. void set_was_purged(VolatilePageRange const&);
  111. size_t remove_lazy_commit_pages(VolatilePageRange const&);
  112. void range_made_volatile(VolatilePageRange const&);
  113. void range_made_nonvolatile(VolatilePageRange const&);
  114. size_t count_needed_commit_pages_for_nonvolatile_range(VolatilePageRange const&);
  115. size_t mark_committed_pages_for_nonvolatile_range(VolatilePageRange const&, size_t);
  116. bool is_nonvolatile(size_t page_index);
  117. AnonymousVMObject& operator=(AnonymousVMObject const&) = delete;
  118. AnonymousVMObject& operator=(AnonymousVMObject&&) = delete;
  119. AnonymousVMObject(AnonymousVMObject&&) = delete;
  120. virtual bool is_anonymous() const override { return true; }
  121. Bitmap& ensure_cow_map();
  122. void ensure_or_reset_cow_map();
  123. VolatilePageRanges m_volatile_ranges_cache;
  124. bool m_volatile_ranges_cache_dirty { true };
  125. Vector<PurgeablePageRanges*> m_purgeable_ranges;
  126. size_t m_unused_committed_pages { 0 };
  127. Bitmap m_cow_map;
  128. // We share a pool of committed cow-pages with clones
  129. RefPtr<CommittedCowPages> m_shared_committed_cow_pages;
  130. };
  131. }