VirtualRangeAllocator.cpp 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Checked.h>
  7. #include <Kernel/Memory/VirtualRangeAllocator.h>
  8. #include <Kernel/Random.h>
  9. #define VM_GUARD_PAGES
  10. namespace Kernel::Memory {
  11. VirtualRangeAllocator::VirtualRangeAllocator()
  12. : m_total_range({}, 0)
  13. {
  14. }
  15. ErrorOr<void> VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
  16. {
  17. m_total_range = { base, size };
  18. TRY(m_available_ranges.try_insert(base.get(), VirtualRange { base, size }));
  19. return {};
  20. }
  21. ErrorOr<void> VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
  22. {
  23. SpinlockLocker lock(parent_allocator.m_lock);
  24. m_total_range = parent_allocator.m_total_range;
  25. m_available_ranges.clear();
  26. for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
  27. TRY(m_available_ranges.try_insert(it.key(), VirtualRange(*it)));
  28. }
  29. return {};
  30. }
  31. void VirtualRangeAllocator::dump() const
  32. {
  33. VERIFY(m_lock.is_locked());
  34. dbgln("VirtualRangeAllocator({})", this);
  35. for (auto& range : m_available_ranges) {
  36. dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
  37. }
  38. }
  39. void VirtualRangeAllocator::carve_from_region(VirtualRange const& from, VirtualRange const& range)
  40. {
  41. VERIFY(m_lock.is_locked());
  42. auto remaining_parts = from.carve(range);
  43. VERIFY(remaining_parts.size() >= 1);
  44. VERIFY(m_total_range.contains(remaining_parts[0]));
  45. m_available_ranges.remove(from.base().get());
  46. m_available_ranges.insert(remaining_parts[0].base().get(), remaining_parts[0]);
  47. if (remaining_parts.size() == 2) {
  48. VERIFY(m_total_range.contains(remaining_parts[1]));
  49. m_available_ranges.insert(remaining_parts[1].base().get(), remaining_parts[1]);
  50. }
  51. }
  52. ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t size, size_t alignment)
  53. {
  54. if (!size)
  55. return EINVAL;
  56. VERIFY((size % PAGE_SIZE) == 0);
  57. VERIFY((alignment % PAGE_SIZE) == 0);
  58. // FIXME: I'm sure there's a smarter way to do this.
  59. static constexpr size_t maximum_randomization_attempts = 1000;
  60. for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
  61. VirtualAddress random_address { round_up_to_power_of_two(get_fast_random<FlatPtr>() % m_total_range.end().get(), alignment) };
  62. if (!m_total_range.contains(random_address, size))
  63. continue;
  64. auto range_or_error = try_allocate_specific(random_address, size);
  65. if (!range_or_error.is_error())
  66. return range_or_error.release_value();
  67. }
  68. return try_allocate_anywhere(size, alignment);
  69. }
  70. ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size, size_t alignment)
  71. {
  72. if (!size)
  73. return EINVAL;
  74. VERIFY((size % PAGE_SIZE) == 0);
  75. VERIFY((alignment % PAGE_SIZE) == 0);
  76. #ifdef VM_GUARD_PAGES
  77. // NOTE: We pad VM allocations with a guard page on each side.
  78. if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
  79. return EOVERFLOW;
  80. size_t effective_size = size + PAGE_SIZE * 2;
  81. size_t offset_from_effective_base = PAGE_SIZE;
  82. #else
  83. size_t effective_size = size;
  84. size_t offset_from_effective_base = 0;
  85. #endif
  86. if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
  87. return EOVERFLOW;
  88. SpinlockLocker lock(m_lock);
  89. for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
  90. auto& available_range = *it;
  91. // FIXME: This check is probably excluding some valid candidates when using a large alignment.
  92. if (available_range.size() < (effective_size + alignment))
  93. continue;
  94. FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
  95. FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
  96. VirtualRange const allocated_range(VirtualAddress(aligned_base), size);
  97. VERIFY(m_total_range.contains(allocated_range));
  98. if (available_range == allocated_range) {
  99. m_available_ranges.remove(it.key());
  100. return allocated_range;
  101. }
  102. carve_from_region(*it, allocated_range);
  103. return allocated_range;
  104. }
  105. dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
  106. return ENOMEM;
  107. }
  108. ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_specific(VirtualAddress base, size_t size)
  109. {
  110. if (!size)
  111. return EINVAL;
  112. VERIFY(base.is_page_aligned());
  113. VERIFY((size % PAGE_SIZE) == 0);
  114. VirtualRange const allocated_range(base, size);
  115. if (!m_total_range.contains(allocated_range))
  116. return ENOMEM;
  117. SpinlockLocker lock(m_lock);
  118. auto available_range = m_available_ranges.find_largest_not_above(base.get());
  119. if (!available_range)
  120. return EEXIST;
  121. if (!available_range->contains(allocated_range))
  122. return EEXIST;
  123. if (*available_range == allocated_range) {
  124. m_available_ranges.remove(available_range->base().get());
  125. return allocated_range;
  126. }
  127. carve_from_region(*available_range, allocated_range);
  128. return allocated_range;
  129. }
  130. void VirtualRangeAllocator::deallocate(VirtualRange const& range)
  131. {
  132. SpinlockLocker lock(m_lock);
  133. VERIFY(m_total_range.contains(range));
  134. VERIFY(range.size());
  135. VERIFY((range.size() % PAGE_SIZE) == 0);
  136. VERIFY(range.base() < range.end());
  137. VERIFY(!m_available_ranges.is_empty());
  138. VirtualRange merged_range = range;
  139. {
  140. // Try merging with preceding range.
  141. auto* preceding_range = m_available_ranges.find_largest_not_above(range.base().get());
  142. if (preceding_range && preceding_range->end() == range.base()) {
  143. preceding_range->m_size += range.size();
  144. merged_range = *preceding_range;
  145. } else {
  146. m_available_ranges.insert(range.base().get(), range);
  147. }
  148. }
  149. {
  150. // Try merging with following range.
  151. auto* following_range = m_available_ranges.find_largest_not_above(range.end().get());
  152. if (following_range && merged_range.end() == following_range->base()) {
  153. auto* existing_range = m_available_ranges.find_largest_not_above(range.base().get());
  154. VERIFY(existing_range->base() == merged_range.base());
  155. existing_range->m_size += following_range->size();
  156. m_available_ranges.remove(following_range->base().get());
  157. }
  158. }
  159. }
  160. }