BlockAllocator.cpp 2.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. /*
  2. * Copyright (c) 2021-2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Platform.h>
  7. #include <AK/Random.h>
  8. #include <AK/Vector.h>
  9. #include <LibJS/Heap/BlockAllocator.h>
  10. #include <LibJS/Heap/HeapBlock.h>
  11. #include <sys/mman.h>
  12. #ifdef HAS_ADDRESS_SANITIZER
  13. # include <sanitizer/asan_interface.h>
  14. #endif
  15. // FIXME: Implement MADV_FREE and/or MADV_DONTNEED on SerenityOS.
  16. #if defined(AK_OS_SERENITY) || defined(AK_OS_GNU_HURD) || (!defined(MADV_FREE) && !defined(MADV_DONTNEED))
  17. # define USE_FALLBACK_BLOCK_DEALLOCATION
  18. #endif
  19. namespace JS {
  20. // NOTE: If this changes, we need to update the mmap() code to ensure correct alignment.
  21. static_assert(HeapBlock::block_size == 4096);
  22. BlockAllocator::~BlockAllocator()
  23. {
  24. for (auto* block : m_blocks) {
  25. ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
  26. if (munmap(block, HeapBlock::block_size) < 0) {
  27. perror("munmap");
  28. VERIFY_NOT_REACHED();
  29. }
  30. }
  31. }
  32. void* BlockAllocator::allocate_block([[maybe_unused]] char const* name)
  33. {
  34. if (!m_blocks.is_empty()) {
  35. // To reduce predictability, take a random block from the cache.
  36. size_t random_index = get_random_uniform(m_blocks.size());
  37. auto* block = m_blocks.unstable_take(random_index);
  38. ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
  39. #ifdef AK_OS_SERENITY
  40. if (set_mmap_name(block, HeapBlock::block_size, name) < 0) {
  41. perror("set_mmap_name");
  42. VERIFY_NOT_REACHED();
  43. }
  44. #endif
  45. return block;
  46. }
  47. #ifdef AK_OS_SERENITY
  48. auto* block = (HeapBlock*)serenity_mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, HeapBlock::block_size, name);
  49. #else
  50. auto* block = (HeapBlock*)mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  51. #endif
  52. VERIFY(block != MAP_FAILED);
  53. return block;
  54. }
  55. void BlockAllocator::deallocate_block(void* block)
  56. {
  57. VERIFY(block);
  58. #if defined(USE_FALLBACK_BLOCK_DEALLOCATION)
  59. // If we can't use any of the nicer techniques, unmap and remap the block to return the physical pages while keeping the VM.
  60. if (munmap(block, HeapBlock::block_size) < 0) {
  61. perror("munmap");
  62. VERIFY_NOT_REACHED();
  63. }
  64. if (mmap(block, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0) != block) {
  65. perror("mmap");
  66. VERIFY_NOT_REACHED();
  67. }
  68. #elif defined(MADV_FREE)
  69. if (madvise(block, HeapBlock::block_size, MADV_FREE) < 0) {
  70. perror("madvise(MADV_FREE)");
  71. VERIFY_NOT_REACHED();
  72. }
  73. #elif defined(MADV_DONTNEED)
  74. if (madvise(block, HeapBlock::block_size, MADV_DONTNEED) < 0) {
  75. perror("madvise(MADV_DONTNEED)");
  76. VERIFY_NOT_REACHED();
  77. }
  78. #endif
  79. ASAN_POISON_MEMORY_REGION(block, HeapBlock::block_size);
  80. m_blocks.append(block);
  81. }
  82. }