RangeAllocator.cpp 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/BinarySearch.h>
  7. #include <AK/Checked.h>
  8. #include <Kernel/Random.h>
  9. #include <Kernel/VM/RangeAllocator.h>
  10. #define VM_GUARD_PAGES
  11. namespace Kernel {
  12. RangeAllocator::RangeAllocator()
  13. : m_total_range({}, 0)
  14. {
  15. }
  16. void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
  17. {
  18. m_total_range = { base, size };
  19. m_available_ranges.append({ base, size });
  20. }
  21. void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocator)
  22. {
  23. ScopedSpinLock lock(parent_allocator.m_lock);
  24. m_total_range = parent_allocator.m_total_range;
  25. m_available_ranges = parent_allocator.m_available_ranges;
  26. }
  27. RangeAllocator::~RangeAllocator()
  28. {
  29. }
  30. void RangeAllocator::dump() const
  31. {
  32. VERIFY(m_lock.is_locked());
  33. dbgln("RangeAllocator({})", this);
  34. for (auto& range : m_available_ranges) {
  35. dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
  36. }
  37. }
  38. void RangeAllocator::carve_at_index(int index, Range const& range)
  39. {
  40. VERIFY(m_lock.is_locked());
  41. auto remaining_parts = m_available_ranges[index].carve(range);
  42. VERIFY(remaining_parts.size() >= 1);
  43. VERIFY(m_total_range.contains(remaining_parts[0]));
  44. m_available_ranges[index] = remaining_parts[0];
  45. if (remaining_parts.size() == 2) {
  46. VERIFY(m_total_range.contains(remaining_parts[1]));
  47. m_available_ranges.insert(index + 1, move(remaining_parts[1]));
  48. }
  49. }
  50. Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
  51. {
  52. if (!size)
  53. return {};
  54. VERIFY((size % PAGE_SIZE) == 0);
  55. VERIFY((alignment % PAGE_SIZE) == 0);
  56. // FIXME: I'm sure there's a smarter way to do this.
  57. static constexpr size_t maximum_randomization_attempts = 1000;
  58. for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
  59. VirtualAddress random_address { round_up_to_power_of_two(get_fast_random<FlatPtr>(), alignment) };
  60. if (!m_total_range.contains(random_address, size))
  61. continue;
  62. auto range = allocate_specific(random_address, size);
  63. if (range.has_value())
  64. return range;
  65. }
  66. return allocate_anywhere(size, alignment);
  67. }
  68. Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
  69. {
  70. if (!size)
  71. return {};
  72. VERIFY((size % PAGE_SIZE) == 0);
  73. VERIFY((alignment % PAGE_SIZE) == 0);
  74. #ifdef VM_GUARD_PAGES
  75. // NOTE: We pad VM allocations with a guard page on each side.
  76. if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
  77. return {};
  78. size_t effective_size = size + PAGE_SIZE * 2;
  79. size_t offset_from_effective_base = PAGE_SIZE;
  80. #else
  81. size_t effective_size = size;
  82. size_t offset_from_effective_base = 0;
  83. #endif
  84. if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
  85. return {};
  86. ScopedSpinLock lock(m_lock);
  87. for (size_t i = 0; i < m_available_ranges.size(); ++i) {
  88. auto& available_range = m_available_ranges[i];
  89. // FIXME: This check is probably excluding some valid candidates when using a large alignment.
  90. if (available_range.size() < (effective_size + alignment))
  91. continue;
  92. FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
  93. FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
  94. Range allocated_range(VirtualAddress(aligned_base), size);
  95. VERIFY(m_total_range.contains(allocated_range));
  96. if (available_range == allocated_range) {
  97. m_available_ranges.remove(i);
  98. return allocated_range;
  99. }
  100. carve_at_index(i, allocated_range);
  101. return allocated_range;
  102. }
  103. dmesgln("RangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
  104. return {};
  105. }
  106. Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
  107. {
  108. if (!size)
  109. return {};
  110. VERIFY(base.is_page_aligned());
  111. VERIFY((size % PAGE_SIZE) == 0);
  112. Range allocated_range(base, size);
  113. ScopedSpinLock lock(m_lock);
  114. for (size_t i = 0; i < m_available_ranges.size(); ++i) {
  115. auto& available_range = m_available_ranges[i];
  116. VERIFY(m_total_range.contains(allocated_range));
  117. if (!available_range.contains(base, size))
  118. continue;
  119. if (available_range == allocated_range) {
  120. m_available_ranges.remove(i);
  121. return allocated_range;
  122. }
  123. carve_at_index(i, allocated_range);
  124. return allocated_range;
  125. }
  126. return {};
  127. }
  128. void RangeAllocator::deallocate(Range const& range)
  129. {
  130. ScopedSpinLock lock(m_lock);
  131. VERIFY(m_total_range.contains(range));
  132. VERIFY(range.size());
  133. VERIFY((range.size() % PAGE_SIZE) == 0);
  134. VERIFY(range.base() < range.end());
  135. VERIFY(!m_available_ranges.is_empty());
  136. size_t nearby_index = 0;
  137. auto* existing_range = binary_search(
  138. m_available_ranges.span(),
  139. range,
  140. &nearby_index,
  141. [](auto& a, auto& b) { return a.base().get() - b.end().get(); });
  142. size_t inserted_index = 0;
  143. if (existing_range) {
  144. existing_range->m_size += range.size();
  145. inserted_index = nearby_index;
  146. } else {
  147. m_available_ranges.insert_before_matching(
  148. Range(range), [&](auto& entry) {
  149. return entry.base() >= range.end();
  150. },
  151. nearby_index, &inserted_index);
  152. }
  153. if (inserted_index < (m_available_ranges.size() - 1)) {
  154. // We already merged with previous. Try to merge with next.
  155. auto& inserted_range = m_available_ranges[inserted_index];
  156. auto& next_range = m_available_ranges[inserted_index + 1];
  157. if (inserted_range.end() == next_range.base()) {
  158. inserted_range.m_size += next_range.size();
  159. m_available_ranges.remove(inserted_index + 1);
  160. return;
  161. }
  162. }
  163. }
  164. }