BlockAllocator.cpp 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. /*
  2. * Copyright (c) 2021-2023, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Platform.h>
  7. #include <AK/Random.h>
  8. #include <AK/Vector.h>
  9. #include <LibJS/Heap/BlockAllocator.h>
  10. #include <LibJS/Heap/HeapBlock.h>
  11. #include <sys/mman.h>
  12. #ifdef HAS_ADDRESS_SANITIZER
  13. # include <sanitizer/asan_interface.h>
  14. # include <sanitizer/lsan_interface.h>
  15. #endif
  16. #if defined(AK_OS_GNU_HURD) || (!defined(MADV_FREE) && !defined(MADV_DONTNEED))
  17. # define USE_FALLBACK_BLOCK_DEALLOCATION
  18. #endif
  19. namespace JS {
  20. BlockAllocator::~BlockAllocator()
  21. {
  22. for (auto* block : m_blocks) {
  23. ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
  24. if (munmap(block, HeapBlock::block_size) < 0) {
  25. perror("munmap");
  26. VERIFY_NOT_REACHED();
  27. }
  28. }
  29. }
  30. void* BlockAllocator::allocate_block([[maybe_unused]] char const* name)
  31. {
  32. if (!m_blocks.is_empty()) {
  33. // To reduce predictability, take a random block from the cache.
  34. size_t random_index = get_random_uniform(m_blocks.size());
  35. auto* block = m_blocks.unstable_take(random_index);
  36. ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
  37. LSAN_REGISTER_ROOT_REGION(block, HeapBlock::block_size);
  38. return block;
  39. }
  40. auto* block = (HeapBlock*)mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  41. VERIFY(block != MAP_FAILED);
  42. LSAN_REGISTER_ROOT_REGION(block, HeapBlock::block_size);
  43. return block;
  44. }
  45. void BlockAllocator::deallocate_block(void* block)
  46. {
  47. VERIFY(block);
  48. #if defined(USE_FALLBACK_BLOCK_DEALLOCATION)
  49. // If we can't use any of the nicer techniques, unmap and remap the block to return the physical pages while keeping the VM.
  50. if (munmap(block, HeapBlock::block_size) < 0) {
  51. perror("munmap");
  52. VERIFY_NOT_REACHED();
  53. }
  54. if (mmap(block, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0) != block) {
  55. perror("mmap");
  56. VERIFY_NOT_REACHED();
  57. }
  58. #elif defined(MADV_FREE)
  59. if (madvise(block, HeapBlock::block_size, MADV_FREE) < 0) {
  60. perror("madvise(MADV_FREE)");
  61. VERIFY_NOT_REACHED();
  62. }
  63. #elif defined(MADV_DONTNEED)
  64. if (madvise(block, HeapBlock::block_size, MADV_DONTNEED) < 0) {
  65. perror("madvise(MADV_DONTNEED)");
  66. VERIFY_NOT_REACHED();
  67. }
  68. #endif
  69. ASAN_POISON_MEMORY_REGION(block, HeapBlock::block_size);
  70. LSAN_UNREGISTER_ROOT_REGION(block, HeapBlock::block_size);
  71. m_blocks.append(block);
  72. }
  73. }