RangeAllocator.cpp 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include "RangeAllocator.h"
  7. #include <AK/BinarySearch.h>
  8. #include <AK/Checked.h>
  9. #include <AK/Random.h>
  10. #define VM_GUARD_PAGES
  11. #define PAGE_MASK ((FlatPtr)0xfffff000u)
  12. namespace UserspaceEmulator {
  13. RangeAllocator::RangeAllocator()
  14. : m_total_range({}, 0)
  15. {
  16. }
  17. void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
  18. {
  19. m_total_range = { base, size };
  20. m_available_ranges.append({ base, size });
  21. }
  22. void RangeAllocator::dump() const
  23. {
  24. dbgln("RangeAllocator({})", this);
  25. for (auto const& range : m_available_ranges) {
  26. dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
  27. }
  28. }
  29. void RangeAllocator::carve_at_index(int index, const Range& range)
  30. {
  31. auto remaining_parts = m_available_ranges[index].carve(range);
  32. VERIFY(remaining_parts.size() >= 1);
  33. VERIFY(m_total_range.contains(remaining_parts[0]));
  34. m_available_ranges[index] = remaining_parts[0];
  35. if (remaining_parts.size() == 2) {
  36. VERIFY(m_total_range.contains(remaining_parts[1]));
  37. m_available_ranges.insert(index + 1, move(remaining_parts[1]));
  38. }
  39. }
  40. Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
  41. {
  42. if (!size)
  43. return {};
  44. VERIFY((size % PAGE_SIZE) == 0);
  45. VERIFY((alignment % PAGE_SIZE) == 0);
  46. // FIXME: I'm sure there's a smarter way to do this.
  47. static constexpr size_t maximum_randomization_attempts = 1000;
  48. for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
  49. VirtualAddress random_address { round_up_to_power_of_two(get_random<FlatPtr>(), alignment) };
  50. if (!m_total_range.contains(random_address, size))
  51. continue;
  52. auto range = allocate_specific(random_address, size);
  53. if (range.has_value())
  54. return range;
  55. }
  56. return allocate_anywhere(size, alignment);
  57. }
  58. Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
  59. {
  60. if (!size)
  61. return {};
  62. VERIFY((size % PAGE_SIZE) == 0);
  63. VERIFY((alignment % PAGE_SIZE) == 0);
  64. #ifdef VM_GUARD_PAGES
  65. // NOTE: We pad VM allocations with a guard page on each side.
  66. if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
  67. return {};
  68. size_t effective_size = size + PAGE_SIZE * 2;
  69. size_t offset_from_effective_base = PAGE_SIZE;
  70. #else
  71. size_t effective_size = size;
  72. size_t offset_from_effective_base = 0;
  73. #endif
  74. if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
  75. return {};
  76. for (size_t i = 0; i < m_available_ranges.size(); ++i) {
  77. auto& available_range = m_available_ranges[i];
  78. // FIXME: This check is probably excluding some valid candidates when using a large alignment.
  79. if (available_range.size() < (effective_size + alignment))
  80. continue;
  81. FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
  82. FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
  83. Range allocated_range(VirtualAddress(aligned_base), size);
  84. VERIFY(m_total_range.contains(allocated_range));
  85. if (available_range == allocated_range) {
  86. m_available_ranges.remove(i);
  87. return allocated_range;
  88. }
  89. carve_at_index(i, allocated_range);
  90. return allocated_range;
  91. }
  92. dbgln("RangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
  93. return {};
  94. }
  95. Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
  96. {
  97. if (!size)
  98. return {};
  99. VERIFY(base.is_page_aligned());
  100. VERIFY((size % PAGE_SIZE) == 0);
  101. Range allocated_range(base, size);
  102. if (!m_total_range.contains(allocated_range)) {
  103. dbgln("Unallocatable mmap request?! {:p}+{:p}", base.get(), size);
  104. return {};
  105. }
  106. for (size_t i = 0; i < m_available_ranges.size(); ++i) {
  107. auto& available_range = m_available_ranges[i];
  108. if (!available_range.contains(base, size))
  109. continue;
  110. if (available_range == allocated_range) {
  111. m_available_ranges.remove(i);
  112. return allocated_range;
  113. }
  114. carve_at_index(i, allocated_range);
  115. return allocated_range;
  116. }
  117. return {};
  118. }
  119. void RangeAllocator::deallocate(const Range& range)
  120. {
  121. VERIFY(m_total_range.contains(range));
  122. VERIFY(range.size());
  123. VERIFY((range.size() % PAGE_SIZE) == 0);
  124. VERIFY(range.base() < range.end());
  125. VERIFY(!m_available_ranges.is_empty());
  126. size_t nearby_index = 0;
  127. auto* existing_range = binary_search(
  128. m_available_ranges.span(),
  129. range,
  130. &nearby_index,
  131. [](auto& a, auto& b) { return a.base().get() - b.end().get(); });
  132. size_t inserted_index = 0;
  133. if (existing_range) {
  134. existing_range->m_size += range.size();
  135. inserted_index = nearby_index;
  136. } else {
  137. m_available_ranges.insert_before_matching(
  138. Range(range), [&](auto& entry) {
  139. return entry.base() >= range.end();
  140. },
  141. nearby_index, &inserted_index);
  142. }
  143. if (inserted_index < (m_available_ranges.size() - 1)) {
  144. // We already merged with previous. Try to merge with next.
  145. auto& inserted_range = m_available_ranges[inserted_index];
  146. auto& next_range = m_available_ranges[inserted_index + 1];
  147. if (inserted_range.end() == next_range.base()) {
  148. inserted_range.m_size += next_range.size();
  149. m_available_ranges.remove(inserted_index + 1);
  150. return;
  151. }
  152. }
  153. }
  154. void RangeAllocator::reserve_user_range(VirtualAddress begin, size_t size)
  155. {
  156. auto end = round_up_to_power_of_two(begin.offset(size).get(), PAGE_SIZE);
  157. auto allocated_range = allocate_specific(begin.page_base(), end - begin.page_base().get());
  158. VERIFY(allocated_range.has_value());
  159. }
  160. }