VirtualRangeAllocator.cpp 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2022, the SerenityOS developers.
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/Checked.h>
  8. #include <Kernel/Memory/VirtualRangeAllocator.h>
  9. #include <Kernel/Random.h>
  10. #define VM_GUARD_PAGES
  11. namespace Kernel::Memory {
  12. VirtualRangeAllocator::VirtualRangeAllocator()
  13. : m_total_range({}, 0)
  14. {
  15. }
  16. ErrorOr<void> VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
  17. {
  18. m_total_range = { base, size };
  19. TRY(m_available_ranges.try_insert(base.get(), VirtualRange { base, size }));
  20. return {};
  21. }
  22. ErrorOr<void> VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
  23. {
  24. SpinlockLocker lock(parent_allocator.m_lock);
  25. m_total_range = parent_allocator.m_total_range;
  26. m_available_ranges.clear();
  27. for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
  28. TRY(m_available_ranges.try_insert(it.key(), VirtualRange(*it)));
  29. }
  30. return {};
  31. }
  32. void VirtualRangeAllocator::dump() const
  33. {
  34. VERIFY(m_lock.is_locked());
  35. dbgln("VirtualRangeAllocator({})", this);
  36. for (auto& range : m_available_ranges) {
  37. dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
  38. }
  39. }
  40. ErrorOr<void> VirtualRangeAllocator::carve_from_region(VirtualRange const& from, VirtualRange const& range)
  41. {
  42. VERIFY(m_lock.is_locked());
  43. auto remaining_parts = from.carve(range);
  44. VERIFY(remaining_parts.size() >= 1);
  45. VERIFY(m_total_range.contains(remaining_parts[0]));
  46. m_available_ranges.remove(from.base().get());
  47. TRY(m_available_ranges.try_insert(remaining_parts[0].base().get(), remaining_parts[0]));
  48. if (remaining_parts.size() == 2) {
  49. VERIFY(m_total_range.contains(remaining_parts[1]));
  50. TRY(m_available_ranges.try_insert(remaining_parts[1].base().get(), remaining_parts[1]));
  51. }
  52. return {};
  53. }
  54. ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t size, size_t alignment)
  55. {
  56. if (!size)
  57. return EINVAL;
  58. VERIFY((size % PAGE_SIZE) == 0);
  59. VERIFY((alignment % PAGE_SIZE) == 0);
  60. // FIXME: I'm sure there's a smarter way to do this.
  61. constexpr size_t maximum_randomization_attempts = 1000;
  62. for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
  63. VirtualAddress random_address { round_up_to_power_of_two(get_fast_random<FlatPtr>() % m_total_range.end().get(), alignment) };
  64. if (!m_total_range.contains(random_address, size))
  65. continue;
  66. auto range_or_error = try_allocate_specific(random_address, size);
  67. if (!range_or_error.is_error())
  68. return range_or_error.release_value();
  69. }
  70. return try_allocate_anywhere(size, alignment);
  71. }
  72. ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size, size_t alignment)
  73. {
  74. if (!size)
  75. return EINVAL;
  76. VERIFY((size % PAGE_SIZE) == 0);
  77. VERIFY((alignment % PAGE_SIZE) == 0);
  78. #ifdef VM_GUARD_PAGES
  79. // NOTE: We pad VM allocations with a guard page on each side.
  80. if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
  81. return EOVERFLOW;
  82. size_t effective_size = size + PAGE_SIZE * 2;
  83. size_t offset_from_effective_base = PAGE_SIZE;
  84. #else
  85. size_t effective_size = size;
  86. size_t offset_from_effective_base = 0;
  87. #endif
  88. if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
  89. return EOVERFLOW;
  90. SpinlockLocker lock(m_lock);
  91. for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
  92. auto& available_range = *it;
  93. // FIXME: This check is probably excluding some valid candidates when using a large alignment.
  94. if (available_range.size() < (effective_size + alignment))
  95. continue;
  96. FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
  97. FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
  98. VirtualRange const allocated_range(VirtualAddress(aligned_base), size);
  99. VERIFY(m_total_range.contains(allocated_range));
  100. if (available_range == allocated_range) {
  101. m_available_ranges.remove(it.key());
  102. return allocated_range;
  103. }
  104. TRY(carve_from_region(*it, allocated_range));
  105. return allocated_range;
  106. }
  107. dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
  108. return ENOMEM;
  109. }
  110. ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_specific(VirtualAddress base, size_t size)
  111. {
  112. if (!size)
  113. return EINVAL;
  114. VERIFY(base.is_page_aligned());
  115. VERIFY((size % PAGE_SIZE) == 0);
  116. VirtualRange const allocated_range(base, size);
  117. if (!m_total_range.contains(allocated_range))
  118. return ENOMEM;
  119. SpinlockLocker lock(m_lock);
  120. auto available_range = m_available_ranges.find_largest_not_above(base.get());
  121. if (!available_range)
  122. return EEXIST;
  123. if (!available_range->contains(allocated_range))
  124. return EEXIST;
  125. if (*available_range == allocated_range) {
  126. m_available_ranges.remove(available_range->base().get());
  127. return allocated_range;
  128. }
  129. TRY(carve_from_region(*available_range, allocated_range));
  130. return allocated_range;
  131. }
  132. void VirtualRangeAllocator::deallocate(VirtualRange const& range)
  133. {
  134. SpinlockLocker lock(m_lock);
  135. VERIFY(m_total_range.contains(range));
  136. VERIFY(range.size());
  137. VERIFY((range.size() % PAGE_SIZE) == 0);
  138. VERIFY(range.base() < range.end());
  139. VERIFY(!m_available_ranges.is_empty());
  140. VirtualRange merged_range = range;
  141. {
  142. // Try merging with preceding range.
  143. auto* preceding_range = m_available_ranges.find_largest_not_above(range.base().get());
  144. if (preceding_range && preceding_range->end() == range.base()) {
  145. preceding_range->m_size += range.size();
  146. merged_range = *preceding_range;
  147. } else {
  148. m_available_ranges.insert(range.base().get(), range);
  149. }
  150. }
  151. {
  152. // Try merging with following range.
  153. auto* following_range = m_available_ranges.find_largest_not_above(range.end().get());
  154. if (following_range && merged_range.end() == following_range->base()) {
  155. auto* existing_range = m_available_ranges.find_largest_not_above(range.base().get());
  156. VERIFY(existing_range->base() == merged_range.base());
  157. existing_range->m_size += following_range->size();
  158. m_available_ranges.remove(following_range->base().get());
  159. }
  160. }
  161. }
  162. }