PageFault.cpp 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Arch/CPU.h>
  7. #include <Kernel/Arch/PageFault.h>
  8. #include <Kernel/Arch/Processor.h>
  9. #include <Kernel/Arch/RegisterState.h>
  10. #include <Kernel/Arch/SafeMem.h>
  11. #include <Kernel/PerformanceManager.h>
  12. #include <Kernel/Tasks/Thread.h>
  13. namespace Kernel {
  14. void PageFault::handle(RegisterState& regs)
  15. {
  16. auto fault_address = m_vaddr.get();
  17. bool faulted_in_kernel = regs.previous_mode() == ExecutionMode::Kernel;
  18. if (faulted_in_kernel && Processor::current_in_irq()) {
  19. // If we're faulting in an IRQ handler, first check if we failed
  20. // due to safe_memcpy, safe_strnlen, or safe_memset. If we did,
  21. // gracefully continue immediately. Because we're in an IRQ handler
  22. // we can't really try to resolve the page fault in a meaningful
  23. // way, so we need to do this before calling into
  24. // MemoryManager::handle_page_fault, which would just bail and
  25. // request a crash
  26. if (handle_safe_access_fault(regs, fault_address))
  27. return;
  28. }
  29. auto current_thread = Thread::current();
  30. if (current_thread) {
  31. current_thread->set_handling_page_fault(true);
  32. PerformanceManager::add_page_fault_event(*current_thread, regs);
  33. }
  34. ScopeGuard guard = [current_thread] {
  35. if (current_thread)
  36. current_thread->set_handling_page_fault(false);
  37. };
  38. if (!faulted_in_kernel) {
  39. VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
  40. bool has_valid_stack_pointer = current_thread->process().address_space().with([&](auto& space) {
  41. return MM.validate_user_stack(*space, userspace_sp);
  42. });
  43. if (!has_valid_stack_pointer) {
  44. dbgln("Invalid stack pointer: {}", userspace_sp);
  45. return handle_crash(regs, "Bad stack on page fault", SIGSEGV);
  46. }
  47. }
  48. auto response = MM.handle_page_fault(*this);
  49. if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory || response == PageFaultResponse::BusError) {
  50. if (faulted_in_kernel && handle_safe_access_fault(regs, fault_address)) {
  51. // If this would be a ring0 (kernel) fault and the fault was triggered by
  52. // safe_memcpy, safe_strnlen, or safe_memset then we resume execution at
  53. // the appropriate _fault label rather than crashing
  54. return;
  55. }
  56. if (response == PageFaultResponse::BusError && current_thread->has_signal_handler(SIGBUS)) {
  57. current_thread->send_urgent_signal_to_self(SIGBUS);
  58. return;
  59. }
  60. if (response != PageFaultResponse::OutOfMemory && current_thread) {
  61. if (current_thread->has_signal_handler(SIGSEGV)) {
  62. current_thread->send_urgent_signal_to_self(SIGSEGV);
  63. return;
  64. }
  65. }
  66. dbgln("Unrecoverable page fault, {}{}{} address {}",
  67. is_reserved_bit_violation() ? "reserved bit violation / " : "",
  68. is_instruction_fetch() ? "instruction fetch / " : "",
  69. is_write() ? "write to" : "read from",
  70. VirtualAddress(fault_address));
  71. constexpr FlatPtr kmalloc_scrub_pattern = explode_byte(KMALLOC_SCRUB_BYTE);
  72. constexpr FlatPtr kfree_scrub_pattern = explode_byte(KFREE_SCRUB_BYTE);
  73. if (response == PageFaultResponse::BusError) {
  74. dbgln("Note: Address {} is an access to an undefined memory range of an Inode-backed VMObject", VirtualAddress(fault_address));
  75. } else if ((fault_address & 0xffff0000) == (kmalloc_scrub_pattern & 0xffff0000)) {
  76. dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
  77. } else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
  78. dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
  79. } else if (fault_address < 4096) {
  80. dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
  81. } else if constexpr (SANITIZE_PTRS) {
  82. constexpr FlatPtr refptr_scrub_pattern = explode_byte(REFPTR_SCRUB_BYTE);
  83. constexpr FlatPtr nonnullrefptr_scrub_pattern = explode_byte(NONNULLREFPTR_SCRUB_BYTE);
  84. constexpr FlatPtr ownptr_scrub_pattern = explode_byte(OWNPTR_SCRUB_BYTE);
  85. constexpr FlatPtr nonnullownptr_scrub_pattern = explode_byte(NONNULLOWNPTR_SCRUB_BYTE);
  86. constexpr FlatPtr lockrefptr_scrub_pattern = explode_byte(LOCKREFPTR_SCRUB_BYTE);
  87. constexpr FlatPtr nonnulllockrefptr_scrub_pattern = explode_byte(NONNULLLOCKREFPTR_SCRUB_BYTE);
  88. if ((fault_address & 0xffff0000) == (refptr_scrub_pattern & 0xffff0000)) {
  89. dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
  90. } else if ((fault_address & 0xffff0000) == (nonnullrefptr_scrub_pattern & 0xffff0000)) {
  91. dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
  92. } else if ((fault_address & 0xffff0000) == (ownptr_scrub_pattern & 0xffff0000)) {
  93. dbgln("Note: Address {} looks like it may be a recently destroyed OwnPtr", VirtualAddress(fault_address));
  94. } else if ((fault_address & 0xffff0000) == (nonnullownptr_scrub_pattern & 0xffff0000)) {
  95. dbgln("Note: Address {} looks like it may be a recently destroyed NonnullOwnPtr", VirtualAddress(fault_address));
  96. } else if ((fault_address & 0xffff0000) == (lockrefptr_scrub_pattern & 0xffff0000)) {
  97. dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
  98. } else if ((fault_address & 0xffff0000) == (nonnulllockrefptr_scrub_pattern & 0xffff0000)) {
  99. dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
  100. }
  101. }
  102. if (current_thread) {
  103. auto& current_process = current_thread->process();
  104. if (current_process.is_user_process()) {
  105. auto fault_address_string = KString::formatted("{:p}", fault_address);
  106. auto fault_address_view = fault_address_string.is_error() ? ""sv : fault_address_string.value()->view();
  107. (void)current_process.try_set_coredump_property("fault_address"sv, fault_address_view);
  108. (void)current_process.try_set_coredump_property("fault_type"sv, type() == PageFault::Type::PageNotPresent ? "NotPresent"sv : "ProtectionViolation"sv);
  109. StringView fault_access;
  110. if (is_instruction_fetch())
  111. fault_access = "Execute"sv;
  112. else
  113. fault_access = access() == PageFault::Access::Read ? "Read"sv : "Write"sv;
  114. (void)current_process.try_set_coredump_property("fault_access"sv, fault_access);
  115. }
  116. }
  117. if (response == PageFaultResponse::BusError)
  118. return handle_crash(regs, "Page Fault (Bus Error)", SIGBUS, false);
  119. return handle_crash(regs, "Page Fault", SIGSEGV, response == PageFaultResponse::OutOfMemory);
  120. } else if (response == PageFaultResponse::Continue) {
  121. dbgln_if(PAGE_FAULT_DEBUG, "Continuing after resolved page fault");
  122. } else {
  123. VERIFY_NOT_REACHED();
  124. }
  125. }
  126. }