cxxabi.cpp 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * Copyright (c) 2019-2021, Andrew Kaster <akaster@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Bitmap.h>
  7. #include <AK/Checked.h>
  8. #include <AK/Debug.h>
  9. #include <AK/Format.h>
  10. #include <AK/NeverDestroyed.h>
  11. #include <LibC/bits/pthread_integration.h>
  12. #include <LibC/mallocdefs.h>
  13. #include <assert.h>
  14. #include <stdio.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <sys/internals.h>
  18. #include <sys/mman.h>
  19. #include <unistd.h>
  20. extern "C" {
  21. struct AtExitEntry {
  22. AtExitFunction method { nullptr };
  23. void* parameter { nullptr };
  24. void* dso_handle { nullptr };
  25. };
  26. // We'll re-allocate the region if it ends up being too small at runtime.
  27. // Invariant: atexit_entry_region_capacity * sizeof(AtExitEntry) does not overflow.
  28. static size_t atexit_entry_region_capacity = PAGE_SIZE / sizeof(AtExitEntry);
  29. static size_t atexit_region_bytes(size_t capacity = atexit_entry_region_capacity)
  30. {
  31. return PAGE_ROUND_UP(capacity * sizeof(AtExitEntry));
  32. }
  33. static size_t atexit_next_capacity()
  34. {
  35. size_t original_num_bytes = atexit_region_bytes();
  36. VERIFY(!Checked<size_t>::addition_would_overflow(original_num_bytes, PAGE_SIZE));
  37. return (original_num_bytes + PAGE_SIZE) / sizeof(AtExitEntry);
  38. }
  39. static AtExitEntry* atexit_entries;
  40. static size_t atexit_entry_count = 0;
  41. static pthread_mutex_t atexit_mutex = __PTHREAD_MUTEX_INITIALIZER;
  42. // The C++ compiler automagically registers the destructor of this object with __cxa_atexit.
  43. // However, we can't control the order in which these destructors are run, so we might still want to access this data after the registered entry.
  44. // Hence, we will call the destructor manually, when we know it is safe to do so.
  45. static NeverDestroyed<Bitmap> atexit_called_entries;
  46. // During startup, it is sufficiently unlikely that the attacker can exploit any write primitive.
  47. // We use this to avoid unnecessary syscalls to mprotect.
  48. static bool atexit_region_should_lock = false;
  49. static void lock_atexit_handlers()
  50. {
  51. if (atexit_region_should_lock && mprotect(atexit_entries, atexit_region_bytes(), PROT_READ) < 0) {
  52. perror("lock_atexit_handlers");
  53. _exit(1);
  54. }
  55. }
  56. static void unlock_atexit_handlers()
  57. {
  58. if (atexit_region_should_lock && mprotect(atexit_entries, atexit_region_bytes(), PROT_READ | PROT_WRITE) < 0) {
  59. perror("unlock_atexit_handlers");
  60. _exit(1);
  61. }
  62. }
  63. void __begin_atexit_locking()
  64. {
  65. atexit_region_should_lock = true;
  66. lock_atexit_handlers();
  67. }
  68. int __cxa_atexit(AtExitFunction exit_function, void* parameter, void* dso_handle)
  69. {
  70. __pthread_mutex_lock(&atexit_mutex);
  71. // allocate initial atexit region
  72. if (!atexit_entries) {
  73. atexit_entries = (AtExitEntry*)mmap(nullptr, atexit_region_bytes(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  74. if (atexit_entries == MAP_FAILED) {
  75. __pthread_mutex_unlock(&atexit_mutex);
  76. perror("__cxa_atexit mmap");
  77. _exit(1);
  78. }
  79. }
  80. // reallocate atexit region, increasing size by PAGE_SIZE
  81. if (atexit_entry_count >= atexit_entry_region_capacity) {
  82. size_t new_capacity = atexit_next_capacity();
  83. size_t new_atexit_region_size = atexit_region_bytes(new_capacity);
  84. dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_atexit: Growing exit handler region from {} entries to {} entries", atexit_entry_region_capacity, new_capacity);
  85. auto* new_atexit_entries = (AtExitEntry*)mmap(nullptr, new_atexit_region_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  86. if (new_atexit_entries == MAP_FAILED) {
  87. __pthread_mutex_unlock(&atexit_mutex);
  88. perror("__cxa_atexit mmap (new size)");
  89. return -1;
  90. }
  91. // Note: We must make sure to only copy initialized entries, as even touching uninitialized bytes will trigger UBSan.
  92. memcpy(new_atexit_entries, atexit_entries, atexit_entry_count * sizeof(AtExitEntry));
  93. if (munmap(atexit_entries, atexit_region_bytes()) < 0) {
  94. perror("__cxa_atexit munmap old region");
  95. // leak the old region on failure
  96. }
  97. atexit_entries = new_atexit_entries;
  98. atexit_entry_region_capacity = new_capacity;
  99. }
  100. unlock_atexit_handlers();
  101. atexit_entries[atexit_entry_count++] = { exit_function, parameter, dso_handle };
  102. lock_atexit_handlers();
  103. __pthread_mutex_unlock(&atexit_mutex);
  104. return 0;
  105. }
  106. void __cxa_finalize(void* dso_handle)
  107. {
  108. // From the itanium abi, https://itanium-cxx-abi.github.io/cxx-abi/abi.html#dso-dtor-runtime-api
  109. //
  110. // When __cxa_finalize(d) is called, it should walk the termination function list, calling each in turn
  111. // if d matches __dso_handle for the termination function entry. If d == NULL, it should call all of them.
  112. // Multiple calls to __cxa_finalize shall not result in calling termination function entries multiple times;
  113. // the implementation may either remove entries or mark them finished.
  114. __pthread_mutex_lock(&atexit_mutex);
  115. if (atexit_entry_count > atexit_called_entries->size())
  116. atexit_called_entries->grow(atexit_entry_count, false);
  117. ssize_t entry_index = atexit_entry_count;
  118. dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: {} entries in the finalizer list", entry_index);
  119. while (--entry_index >= 0) {
  120. auto& exit_entry = atexit_entries[entry_index];
  121. bool needs_calling = !atexit_called_entries->get(entry_index) && (!dso_handle || dso_handle == exit_entry.dso_handle);
  122. if (needs_calling) {
  123. dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: calling entry[{}] {:p}({:p}) dso: {:p}", entry_index, exit_entry.method, exit_entry.parameter, exit_entry.dso_handle);
  124. atexit_called_entries->set(entry_index, true);
  125. __pthread_mutex_unlock(&atexit_mutex);
  126. exit_entry.method(exit_entry.parameter);
  127. __pthread_mutex_lock(&atexit_mutex);
  128. }
  129. }
  130. __pthread_mutex_unlock(&atexit_mutex);
  131. }
  132. __attribute__((noreturn)) void __cxa_pure_virtual()
  133. {
  134. VERIFY_NOT_REACHED();
  135. }
  136. } // extern "C"