RegionTree.cpp 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*
  2. * Copyright (c) 2022, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Format.h>
  7. #include <Kernel/Memory/AnonymousVMObject.h>
  8. #include <Kernel/Memory/MemoryManager.h>
  9. #include <Kernel/Memory/RegionTree.h>
  10. #include <Kernel/Random.h>
  11. namespace Kernel::Memory {
  12. RegionTree::~RegionTree()
  13. {
  14. delete_all_regions_assuming_they_are_unmapped();
  15. }
  16. void RegionTree::delete_all_regions_assuming_they_are_unmapped()
  17. {
  18. // FIXME: This could definitely be done in a more efficient manner.
  19. while (!m_regions.is_empty()) {
  20. auto& region = *m_regions.begin();
  21. m_regions.remove(region.vaddr().get());
  22. delete &region;
  23. }
  24. }
  25. ErrorOr<VirtualRange> RegionTree::allocate_range_anywhere(size_t size, size_t alignment)
  26. {
  27. if (!size)
  28. return EINVAL;
  29. VERIFY((size % PAGE_SIZE) == 0);
  30. VERIFY((alignment % PAGE_SIZE) == 0);
  31. if (Checked<size_t>::addition_would_overflow(size, alignment))
  32. return EOVERFLOW;
  33. VirtualAddress window_start = m_total_range.base();
  34. auto allocate_from_window = [&](VirtualRange const& window) -> Optional<VirtualRange> {
  35. // FIXME: This check is probably excluding some valid candidates when using a large alignment.
  36. if (window.size() < (size + alignment))
  37. return {};
  38. FlatPtr initial_base = window.base().get();
  39. FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
  40. VERIFY(size);
  41. return VirtualRange { VirtualAddress(aligned_base), size };
  42. };
  43. for (auto it = m_regions.begin(); !it.is_end(); ++it) {
  44. auto& region = *it;
  45. if (window_start == region.vaddr()) {
  46. window_start = region.range().end();
  47. continue;
  48. }
  49. VirtualRange window { window_start, region.vaddr().get() - window_start.get() };
  50. window_start = region.range().end();
  51. if (auto maybe_range = allocate_from_window(window); maybe_range.has_value())
  52. return maybe_range.release_value();
  53. }
  54. VirtualRange window { window_start, m_total_range.end().get() - window_start.get() };
  55. if (m_total_range.contains(window)) {
  56. if (auto maybe_range = allocate_from_window(window); maybe_range.has_value())
  57. return maybe_range.release_value();
  58. }
  59. dmesgln("RegionTree: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
  60. return ENOMEM;
  61. }
  62. ErrorOr<VirtualRange> RegionTree::allocate_range_specific(VirtualAddress base, size_t size)
  63. {
  64. if (!size)
  65. return EINVAL;
  66. VERIFY(base.is_page_aligned());
  67. VERIFY((size % PAGE_SIZE) == 0);
  68. VirtualRange const range { base, size };
  69. if (!m_total_range.contains(range))
  70. return ENOMEM;
  71. auto* region = m_regions.find_largest_not_above(base.offset(size).get());
  72. if (!region) {
  73. // The range can be accommodated below the current lowest range.
  74. return range;
  75. }
  76. if (region->range().intersects(range)) {
  77. // Requested range overlaps an existing range.
  78. return ENOMEM;
  79. }
  80. auto it = m_regions.begin_from(region->vaddr().get());
  81. VERIFY(!it.is_end());
  82. ++it;
  83. if (it.is_end()) {
  84. // The range can be accommodated above the nearest range.
  85. return range;
  86. }
  87. if (it->range().intersects(range)) {
  88. // Requested range overlaps the next neighbor.
  89. return ENOMEM;
  90. }
  91. // Requested range fits between first region and its next neighbor.
  92. return range;
  93. }
  94. ErrorOr<VirtualRange> RegionTree::allocate_range_randomized(size_t size, size_t alignment)
  95. {
  96. if (!size)
  97. return EINVAL;
  98. VERIFY((size % PAGE_SIZE) == 0);
  99. VERIFY((alignment % PAGE_SIZE) == 0);
  100. // FIXME: I'm sure there's a smarter way to do this.
  101. constexpr size_t maximum_randomization_attempts = 1000;
  102. for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
  103. VirtualAddress random_address { round_up_to_power_of_two(get_fast_random<FlatPtr>() % m_total_range.end().get(), alignment) };
  104. if (!m_total_range.contains(random_address, size))
  105. continue;
  106. auto range_or_error = allocate_range_specific(random_address, size);
  107. if (!range_or_error.is_error())
  108. return range_or_error.release_value();
  109. }
  110. return allocate_range_anywhere(size, alignment);
  111. }
  112. ErrorOr<NonnullOwnPtr<Region>> RegionTree::allocate_unbacked_anywhere(size_t size, size_t alignment)
  113. {
  114. auto region = TRY(Region::create_unbacked());
  115. TRY(place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment));
  116. return region;
  117. }
  118. ErrorOr<void> RegionTree::place_anywhere(Region& region, RandomizeVirtualAddress randomize_virtual_address, size_t size, size_t alignment)
  119. {
  120. SpinlockLocker locker(m_lock);
  121. auto range = TRY(randomize_virtual_address == RandomizeVirtualAddress::Yes ? allocate_range_randomized(size, alignment) : allocate_range_anywhere(size, alignment));
  122. region.m_range = range;
  123. m_regions.insert(region.vaddr().get(), region);
  124. return {};
  125. }
  126. ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const& range)
  127. {
  128. SpinlockLocker locker(m_lock);
  129. auto allocated_range = TRY(allocate_range_specific(range.base(), range.size()));
  130. region.m_range = allocated_range;
  131. m_regions.insert(region.vaddr().get(), region);
  132. return {};
  133. }
  134. ErrorOr<NonnullOwnPtr<Memory::Region>> RegionTree::create_identity_mapped_region(PhysicalAddress paddr, size_t size)
  135. {
  136. auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size));
  137. auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
  138. Memory::VirtualRange range { VirtualAddress { (FlatPtr)paddr.get() }, size };
  139. region->m_range = range;
  140. TRY(region->map(MM.kernel_page_directory()));
  141. return region;
  142. }
  143. }