cxxabi.cpp 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * Copyright (c) 2019-2021, Andrew Kaster <akaster@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Bitmap.h>
  7. #include <AK/Checked.h>
  8. #include <AK/Debug.h>
  9. #include <AK/Format.h>
  10. #include <AK/NeverDestroyed.h>
  11. #include <assert.h>
  12. #include <bits/pthread_integration.h>
  13. #include <mallocdefs.h>
  14. #include <pthread.h>
  15. #include <stdio.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <sys/internals.h>
  19. #include <sys/mman.h>
  20. #include <unistd.h>
  21. extern "C" {
  22. struct AtExitEntry {
  23. AtExitFunction method { nullptr };
  24. void* parameter { nullptr };
  25. void* dso_handle { nullptr };
  26. };
  27. // We'll re-allocate the region if it ends up being too small at runtime.
  28. // Invariant: atexit_entry_region_capacity * sizeof(AtExitEntry) does not overflow.
  29. static size_t atexit_entry_region_capacity = PAGE_SIZE / sizeof(AtExitEntry);
  30. static size_t atexit_region_bytes(size_t capacity = atexit_entry_region_capacity)
  31. {
  32. return PAGE_ROUND_UP(capacity * sizeof(AtExitEntry));
  33. }
  34. static size_t atexit_next_capacity()
  35. {
  36. size_t original_num_bytes = atexit_region_bytes();
  37. VERIFY(!Checked<size_t>::addition_would_overflow(original_num_bytes, PAGE_SIZE));
  38. return (original_num_bytes + PAGE_SIZE) / sizeof(AtExitEntry);
  39. }
  40. static AtExitEntry* atexit_entries;
  41. static size_t atexit_entry_count = 0;
  42. static pthread_mutex_t atexit_mutex = __PTHREAD_MUTEX_INITIALIZER;
  43. // The C++ compiler automagically registers the destructor of this object with __cxa_atexit.
  44. // However, we can't control the order in which these destructors are run, so we might still want to access this data after the registered entry.
  45. // Hence, we will call the destructor manually, when we know it is safe to do so.
  46. static NeverDestroyed<Bitmap> atexit_called_entries;
  47. // During startup, it is sufficiently unlikely that the attacker can exploit any write primitive.
  48. // We use this to avoid unnecessary syscalls to mprotect.
  49. static bool atexit_region_should_lock = false;
  50. static void lock_atexit_handlers()
  51. {
  52. if (atexit_region_should_lock && mprotect(atexit_entries, atexit_region_bytes(), PROT_READ) < 0) {
  53. perror("lock_atexit_handlers");
  54. _exit(1);
  55. }
  56. }
  57. static void unlock_atexit_handlers()
  58. {
  59. if (atexit_region_should_lock && mprotect(atexit_entries, atexit_region_bytes(), PROT_READ | PROT_WRITE) < 0) {
  60. perror("unlock_atexit_handlers");
  61. _exit(1);
  62. }
  63. }
  64. void __begin_atexit_locking()
  65. {
  66. atexit_region_should_lock = true;
  67. lock_atexit_handlers();
  68. }
  69. int __cxa_atexit(AtExitFunction exit_function, void* parameter, void* dso_handle)
  70. {
  71. pthread_mutex_lock(&atexit_mutex);
  72. // allocate initial atexit region
  73. if (!atexit_entries) {
  74. atexit_entries = (AtExitEntry*)mmap(nullptr, atexit_region_bytes(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  75. if (atexit_entries == MAP_FAILED) {
  76. pthread_mutex_unlock(&atexit_mutex);
  77. perror("__cxa_atexit mmap");
  78. _exit(1);
  79. }
  80. }
  81. // reallocate atexit region, increasing size by PAGE_SIZE
  82. if (atexit_entry_count >= atexit_entry_region_capacity) {
  83. size_t new_capacity = atexit_next_capacity();
  84. size_t new_atexit_region_size = atexit_region_bytes(new_capacity);
  85. dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_atexit: Growing exit handler region from {} entries to {} entries", atexit_entry_region_capacity, new_capacity);
  86. auto* new_atexit_entries = (AtExitEntry*)mmap(nullptr, new_atexit_region_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  87. if (new_atexit_entries == MAP_FAILED) {
  88. pthread_mutex_unlock(&atexit_mutex);
  89. perror("__cxa_atexit mmap (new size)");
  90. return -1;
  91. }
  92. // Note: We must make sure to only copy initialized entries, as even touching uninitialized bytes will trigger UBSan.
  93. memcpy(new_atexit_entries, atexit_entries, atexit_entry_count * sizeof(AtExitEntry));
  94. if (munmap(atexit_entries, atexit_region_bytes()) < 0) {
  95. perror("__cxa_atexit munmap old region");
  96. // leak the old region on failure
  97. }
  98. atexit_entries = new_atexit_entries;
  99. atexit_entry_region_capacity = new_capacity;
  100. }
  101. unlock_atexit_handlers();
  102. atexit_entries[atexit_entry_count++] = { exit_function, parameter, dso_handle };
  103. lock_atexit_handlers();
  104. pthread_mutex_unlock(&atexit_mutex);
  105. return 0;
  106. }
  107. void __cxa_finalize(void* dso_handle)
  108. {
  109. // From the itanium abi, https://itanium-cxx-abi.github.io/cxx-abi/abi.html#dso-dtor-runtime-api
  110. //
  111. // When __cxa_finalize(d) is called, it should walk the termination function list, calling each in turn
  112. // if d matches __dso_handle for the termination function entry. If d == NULL, it should call all of them.
  113. // Multiple calls to __cxa_finalize shall not result in calling termination function entries multiple times;
  114. // the implementation may either remove entries or mark them finished.
  115. pthread_mutex_lock(&atexit_mutex);
  116. if (atexit_entry_count > atexit_called_entries->size())
  117. atexit_called_entries->grow(atexit_entry_count, false);
  118. ssize_t entry_index = atexit_entry_count;
  119. dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: {} entries in the finalizer list", entry_index);
  120. while (--entry_index >= 0) {
  121. auto& exit_entry = atexit_entries[entry_index];
  122. bool needs_calling = !atexit_called_entries->get(entry_index) && (!dso_handle || dso_handle == exit_entry.dso_handle);
  123. if (needs_calling) {
  124. dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: calling entry[{}] {:p}({:p}) dso: {:p}", entry_index, exit_entry.method, exit_entry.parameter, exit_entry.dso_handle);
  125. atexit_called_entries->set(entry_index, true);
  126. pthread_mutex_unlock(&atexit_mutex);
  127. exit_entry.method(exit_entry.parameter);
  128. pthread_mutex_lock(&atexit_mutex);
  129. }
  130. }
  131. pthread_mutex_unlock(&atexit_mutex);
  132. }
  133. __attribute__((noreturn)) void __cxa_pure_virtual()
  134. {
  135. VERIFY_NOT_REACHED();
  136. }
  137. } // extern "C"