VirtualRangeAllocator.cpp 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Checked.h>
  7. #include <Kernel/Memory/VirtualRangeAllocator.h>
  8. #include <Kernel/Random.h>
  9. #define VM_GUARD_PAGES
  10. namespace Kernel::Memory {
  11. VirtualRangeAllocator::VirtualRangeAllocator()
  12. : m_total_range({}, 0)
  13. {
  14. }
  15. void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
  16. {
  17. m_total_range = { base, size };
  18. m_available_ranges.insert(base.get(), VirtualRange { base, size });
  19. }
  20. void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
  21. {
  22. SpinlockLocker lock(parent_allocator.m_lock);
  23. m_total_range = parent_allocator.m_total_range;
  24. m_available_ranges.clear();
  25. for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
  26. m_available_ranges.insert(it.key(), *it);
  27. }
  28. }
  29. void VirtualRangeAllocator::dump() const
  30. {
  31. VERIFY(m_lock.is_locked());
  32. dbgln("VirtualRangeAllocator({})", this);
  33. for (auto& range : m_available_ranges) {
  34. dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
  35. }
  36. }
  37. void VirtualRangeAllocator::carve_at_iterator(auto& it, VirtualRange const& range)
  38. {
  39. VERIFY(m_lock.is_locked());
  40. auto remaining_parts = (*it).carve(range);
  41. VERIFY(remaining_parts.size() >= 1);
  42. VERIFY(m_total_range.contains(remaining_parts[0]));
  43. m_available_ranges.remove(it.key());
  44. m_available_ranges.insert(remaining_parts[0].base().get(), remaining_parts[0]);
  45. if (remaining_parts.size() == 2) {
  46. VERIFY(m_total_range.contains(remaining_parts[1]));
  47. m_available_ranges.insert(remaining_parts[1].base().get(), remaining_parts[1]);
  48. }
  49. }
  50. Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, size_t alignment)
  51. {
  52. if (!size)
  53. return {};
  54. VERIFY((size % PAGE_SIZE) == 0);
  55. VERIFY((alignment % PAGE_SIZE) == 0);
  56. // FIXME: I'm sure there's a smarter way to do this.
  57. static constexpr size_t maximum_randomization_attempts = 1000;
  58. for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
  59. VirtualAddress random_address { round_up_to_power_of_two(get_fast_random<FlatPtr>() % m_total_range.end().get(), alignment) };
  60. if (!m_total_range.contains(random_address, size))
  61. continue;
  62. auto range = allocate_specific(random_address, size);
  63. if (range.has_value())
  64. return range;
  65. }
  66. return allocate_anywhere(size, alignment);
  67. }
  68. Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, size_t alignment)
  69. {
  70. if (!size)
  71. return {};
  72. VERIFY((size % PAGE_SIZE) == 0);
  73. VERIFY((alignment % PAGE_SIZE) == 0);
  74. #ifdef VM_GUARD_PAGES
  75. // NOTE: We pad VM allocations with a guard page on each side.
  76. if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
  77. return {};
  78. size_t effective_size = size + PAGE_SIZE * 2;
  79. size_t offset_from_effective_base = PAGE_SIZE;
  80. #else
  81. size_t effective_size = size;
  82. size_t offset_from_effective_base = 0;
  83. #endif
  84. if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
  85. return {};
  86. SpinlockLocker lock(m_lock);
  87. for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
  88. auto& available_range = *it;
  89. // FIXME: This check is probably excluding some valid candidates when using a large alignment.
  90. if (available_range.size() < (effective_size + alignment))
  91. continue;
  92. FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
  93. FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
  94. VirtualRange const allocated_range(VirtualAddress(aligned_base), size);
  95. VERIFY(m_total_range.contains(allocated_range));
  96. if (available_range == allocated_range) {
  97. m_available_ranges.remove(it.key());
  98. return allocated_range;
  99. }
  100. carve_at_iterator(it, allocated_range);
  101. return allocated_range;
  102. }
  103. dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
  104. return {};
  105. }
  106. Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress base, size_t size)
  107. {
  108. if (!size)
  109. return {};
  110. VERIFY(base.is_page_aligned());
  111. VERIFY((size % PAGE_SIZE) == 0);
  112. VirtualRange const allocated_range(base, size);
  113. if (!m_total_range.contains(allocated_range)) {
  114. return {};
  115. }
  116. SpinlockLocker lock(m_lock);
  117. for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
  118. auto& available_range = *it;
  119. if (!available_range.contains(base, size))
  120. continue;
  121. if (available_range == allocated_range) {
  122. m_available_ranges.remove(it.key());
  123. return allocated_range;
  124. }
  125. carve_at_iterator(it, allocated_range);
  126. return allocated_range;
  127. }
  128. return {};
  129. }
  130. void VirtualRangeAllocator::deallocate(VirtualRange const& range)
  131. {
  132. SpinlockLocker lock(m_lock);
  133. VERIFY(m_total_range.contains(range));
  134. VERIFY(range.size());
  135. VERIFY((range.size() % PAGE_SIZE) == 0);
  136. VERIFY(range.base() < range.end());
  137. VERIFY(!m_available_ranges.is_empty());
  138. VirtualRange merged_range = range;
  139. {
  140. // Try merging with preceding range.
  141. auto* preceding_range = m_available_ranges.find_largest_not_above(range.base().get());
  142. if (preceding_range && preceding_range->end() == range.base()) {
  143. preceding_range->m_size += range.size();
  144. merged_range = *preceding_range;
  145. } else {
  146. m_available_ranges.insert(range.base().get(), range);
  147. }
  148. }
  149. {
  150. // Try merging with following range.
  151. auto* following_range = m_available_ranges.find_largest_not_above(range.end().get());
  152. if (following_range && merged_range.end() == following_range->base()) {
  153. auto* existing_range = m_available_ranges.find_largest_not_above(range.base().get());
  154. VERIFY(existing_range->base() == merged_range.base());
  155. existing_range->m_size += following_range->size();
  156. m_available_ranges.remove(following_range->base().get());
  157. }
  158. }
  159. }
  160. }