sigaction.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Idan Horowitz <idan.horowitz@serenityos.org>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <Kernel/Arch/SmapDisabler.h>
  8. #include <Kernel/Arch/x86/InterruptDisabler.h>
  9. #include <Kernel/Process.h>
  10. namespace Kernel {
  11. ErrorOr<FlatPtr> Process::sys$sigprocmask(int how, Userspace<const sigset_t*> set, Userspace<sigset_t*> old_set)
  12. {
  13. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
  14. TRY(require_promise(Pledge::sigaction));
  15. auto* current_thread = Thread::current();
  16. u32 previous_signal_mask;
  17. if (set) {
  18. auto set_value = TRY(copy_typed_from_user(set));
  19. switch (how) {
  20. case SIG_BLOCK:
  21. previous_signal_mask = current_thread->signal_mask_block(set_value, true);
  22. break;
  23. case SIG_UNBLOCK:
  24. previous_signal_mask = current_thread->signal_mask_block(set_value, false);
  25. break;
  26. case SIG_SETMASK:
  27. previous_signal_mask = current_thread->update_signal_mask(set_value);
  28. break;
  29. default:
  30. return EINVAL;
  31. }
  32. } else {
  33. previous_signal_mask = current_thread->signal_mask();
  34. }
  35. if (old_set) {
  36. TRY(copy_to_user(old_set, &previous_signal_mask));
  37. }
  38. return 0;
  39. }
  40. ErrorOr<FlatPtr> Process::sys$sigpending(Userspace<sigset_t*> set)
  41. {
  42. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
  43. TRY(require_promise(Pledge::stdio));
  44. auto pending_signals = Thread::current()->pending_signals();
  45. TRY(copy_to_user(set, &pending_signals));
  46. return 0;
  47. }
  48. ErrorOr<FlatPtr> Process::sys$sigaction(int signum, Userspace<const sigaction*> user_act, Userspace<sigaction*> user_old_act)
  49. {
  50. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
  51. TRY(require_promise(Pledge::sigaction));
  52. if (signum < 1 || signum >= 32 || signum == SIGKILL || signum == SIGSTOP)
  53. return EINVAL;
  54. InterruptDisabler disabler; // FIXME: This should use a narrower lock. Maybe a way to ignore signals temporarily?
  55. auto& action = m_signal_action_data[signum];
  56. if (user_old_act) {
  57. sigaction old_act {};
  58. old_act.sa_flags = action.flags;
  59. old_act.sa_sigaction = reinterpret_cast<decltype(old_act.sa_sigaction)>(action.handler_or_sigaction.as_ptr());
  60. old_act.sa_mask = action.mask;
  61. TRY(copy_to_user(user_old_act, &old_act));
  62. }
  63. if (user_act) {
  64. auto act = TRY(copy_typed_from_user(user_act));
  65. action.mask = act.sa_mask;
  66. action.flags = act.sa_flags;
  67. action.handler_or_sigaction = VirtualAddress { reinterpret_cast<void*>(act.sa_sigaction) };
  68. }
  69. return 0;
  70. }
  71. ErrorOr<FlatPtr> Process::sys$sigreturn([[maybe_unused]] RegisterState& registers)
  72. {
  73. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
  74. TRY(require_promise(Pledge::stdio));
  75. SmapDisabler disabler;
  76. #if ARCH(I386)
  77. // Here, we restore the state pushed by dispatch signal and asm_signal_trampoline.
  78. u32* stack_ptr = (u32*)registers.userspace_esp;
  79. u32 smuggled_eax = *stack_ptr;
  80. // pop the stored eax, ebp, return address, handler and signal code
  81. stack_ptr += 5;
  82. Thread::current()->m_signal_mask = *stack_ptr;
  83. stack_ptr++;
  84. // pop edi, esi, ebp, esp, ebx, edx, ecx and eax
  85. memcpy(&registers.edi, stack_ptr, 8 * sizeof(FlatPtr));
  86. stack_ptr += 8;
  87. registers.eip = *stack_ptr;
  88. stack_ptr++;
  89. registers.eflags = (registers.eflags & ~safe_eflags_mask) | (*stack_ptr & safe_eflags_mask);
  90. stack_ptr++;
  91. registers.userspace_esp = registers.esp;
  92. return smuggled_eax;
  93. #else
  94. // Here, we restore the state pushed by dispatch signal and asm_signal_trampoline.
  95. FlatPtr* stack_ptr = (FlatPtr*)registers.userspace_rsp;
  96. FlatPtr smuggled_rax = *stack_ptr;
  97. // pop the stored rax, rbp, return address, handler and signal code
  98. stack_ptr += 5;
  99. Thread::current()->m_signal_mask = *stack_ptr;
  100. stack_ptr++;
  101. // pop rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, r8, r9, r10, r11, r12, r13, r14 and r15
  102. memcpy(&registers.rdi, stack_ptr, 16 * sizeof(FlatPtr));
  103. stack_ptr += 16;
  104. registers.rip = *stack_ptr;
  105. stack_ptr++;
  106. registers.rflags = (registers.rflags & ~safe_eflags_mask) | (*stack_ptr & safe_eflags_mask);
  107. stack_ptr++;
  108. registers.userspace_rsp = registers.rsp;
  109. return smuggled_rax;
  110. #endif
  111. }
  112. ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
  113. {
  114. // FIXME: This duplicates a lot of logic from sys$mprotect, this should be abstracted out somehow
  115. auto range_to_remap = TRY(Memory::expand_range_to_page_boundaries(address, size));
  116. if (!range_to_remap.size())
  117. return EINVAL;
  118. if (!is_user_range(range_to_remap))
  119. return EFAULT;
  120. if (auto* whole_region = address_space().find_region_from_range(range_to_remap)) {
  121. if (!whole_region->is_mmap())
  122. return EPERM;
  123. if (!whole_region->vmobject().is_anonymous() || whole_region->is_shared())
  124. return EINVAL;
  125. whole_region->unsafe_clear_access();
  126. whole_region->set_readable(true);
  127. whole_region->set_writable(true);
  128. whole_region->set_stack(true);
  129. whole_region->set_syscall_region(false);
  130. whole_region->clear_to_zero();
  131. whole_region->remap();
  132. return {};
  133. }
  134. if (auto* old_region = address_space().find_region_containing(range_to_remap)) {
  135. if (!old_region->is_mmap())
  136. return EPERM;
  137. if (!old_region->vmobject().is_anonymous() || old_region->is_shared())
  138. return EINVAL;
  139. // Remove the old region from our regions tree, since were going to add another region
  140. // with the exact same start address, but do not deallocate it yet
  141. auto region = address_space().take_region(*old_region);
  142. // Unmap the old region here, specifying that we *don't* want the VM deallocated.
  143. region->unmap(Memory::Region::ShouldDeallocateVirtualRange::No);
  144. // This vector is the region(s) adjacent to our range.
  145. // We need to allocate a new region for the range we wanted to change permission bits on.
  146. auto adjacent_regions = TRY(address_space().try_split_region_around_range(*region, range_to_remap));
  147. size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_remap.base().get() - region->range().base().get());
  148. auto* new_region = TRY(address_space().try_allocate_split_region(*region, range_to_remap, new_range_offset_in_vmobject));
  149. new_region->unsafe_clear_access();
  150. new_region->set_readable(true);
  151. new_region->set_writable(true);
  152. new_region->set_stack(true);
  153. new_region->set_syscall_region(false);
  154. new_region->clear_to_zero();
  155. // Map the new regions using our page directory (they were just allocated and don't have one).
  156. for (auto* adjacent_region : adjacent_regions) {
  157. TRY(adjacent_region->map(address_space().page_directory()));
  158. }
  159. TRY(new_region->map(address_space().page_directory()));
  160. return {};
  161. }
  162. if (const auto& regions = TRY(address_space().find_regions_intersecting(range_to_remap)); regions.size()) {
  163. size_t full_size_found = 0;
  164. // Check that all intersecting regions are compatible.
  165. for (const auto* region : regions) {
  166. if (!region->is_mmap())
  167. return EPERM;
  168. if (!region->vmobject().is_anonymous() || region->is_shared())
  169. return EINVAL;
  170. full_size_found += region->range().intersect(range_to_remap).size();
  171. }
  172. if (full_size_found != range_to_remap.size())
  173. return ENOMEM;
  174. // Finally, iterate over each region, either updating its access flags if the range covers it wholly,
  175. // or carving out a new subregion with the appropriate access flags set.
  176. for (auto* old_region : regions) {
  177. const auto intersection_to_remap = range_to_remap.intersect(old_region->range());
  178. // If the region is completely covered by range, simply update the access flags
  179. if (intersection_to_remap == old_region->range()) {
  180. old_region->unsafe_clear_access();
  181. old_region->set_readable(true);
  182. old_region->set_writable(true);
  183. old_region->set_stack(true);
  184. old_region->set_syscall_region(false);
  185. old_region->clear_to_zero();
  186. old_region->remap();
  187. continue;
  188. }
  189. // Remove the old region from our regions tree, since were going to add another region
  190. // with the exact same start address, but dont deallocate it yet
  191. auto region = address_space().take_region(*old_region);
  192. // Unmap the old region here, specifying that we *don't* want the VM deallocated.
  193. region->unmap(Memory::Region::ShouldDeallocateVirtualRange::No);
  194. // This vector is the region(s) adjacent to our range.
  195. // We need to allocate a new region for the range we wanted to change permission bits on.
  196. auto adjacent_regions = TRY(address_space().try_split_region_around_range(*old_region, intersection_to_remap));
  197. // Since the range is not contained in a single region, it can only partially cover its starting and ending region,
  198. // therefore carving out a chunk from the region will always produce a single extra region, and not two.
  199. VERIFY(adjacent_regions.size() == 1);
  200. size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_remap.base().get() - old_region->range().base().get());
  201. auto* new_region = TRY(address_space().try_allocate_split_region(*region, intersection_to_remap, new_range_offset_in_vmobject));
  202. new_region->unsafe_clear_access();
  203. new_region->set_readable(true);
  204. new_region->set_writable(true);
  205. new_region->set_stack(true);
  206. new_region->set_syscall_region(false);
  207. new_region->clear_to_zero();
  208. // Map the new region using our page directory (they were just allocated and don't have one) if any.
  209. TRY(adjacent_regions[0]->map(address_space().page_directory()));
  210. TRY(new_region->map(address_space().page_directory()));
  211. }
  212. return {};
  213. }
  214. return EINVAL;
  215. }
  216. ErrorOr<FlatPtr> Process::sys$sigaltstack(Userspace<const stack_t*> user_ss, Userspace<stack_t*> user_old_ss)
  217. {
  218. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
  219. TRY(require_promise(Pledge::sigaction));
  220. if (user_old_ss) {
  221. stack_t old_ss_value {};
  222. old_ss_value.ss_sp = (void*)Thread::current()->m_alternative_signal_stack;
  223. old_ss_value.ss_size = Thread::current()->m_alternative_signal_stack_size;
  224. old_ss_value.ss_flags = 0;
  225. if (!Thread::current()->has_alternative_signal_stack())
  226. old_ss_value.ss_flags = SS_DISABLE;
  227. else if (Thread::current()->is_in_alternative_signal_stack())
  228. old_ss_value.ss_flags = SS_ONSTACK;
  229. TRY(copy_to_user(user_old_ss, &old_ss_value));
  230. }
  231. if (user_ss) {
  232. auto ss = TRY(copy_typed_from_user(user_ss));
  233. if (Thread::current()->is_in_alternative_signal_stack())
  234. return EPERM;
  235. if (ss.ss_flags == SS_DISABLE) {
  236. Thread::current()->m_alternative_signal_stack_size = 0;
  237. Thread::current()->m_alternative_signal_stack = 0;
  238. } else if (ss.ss_flags == 0) {
  239. if (ss.ss_size <= MINSIGSTKSZ)
  240. return ENOMEM;
  241. if (Checked<FlatPtr>::addition_would_overflow((FlatPtr)ss.ss_sp, ss.ss_size))
  242. return ENOMEM;
  243. // In order to preserve compatibility with our MAP_STACK, W^X and syscall region
  244. // protections, sigaltstack ranges are carved out of their regions, zeroed, and
  245. // turned into read/writable MAP_STACK-enabled regions.
  246. // This is inspired by OpenBSD's solution: https://man.openbsd.org/sigaltstack.2
  247. TRY(remap_range_as_stack((FlatPtr)ss.ss_sp, ss.ss_size));
  248. Thread::current()->m_alternative_signal_stack = (FlatPtr)ss.ss_sp;
  249. Thread::current()->m_alternative_signal_stack_size = ss.ss_size;
  250. } else {
  251. return EINVAL;
  252. }
  253. }
  254. return 0;
  255. }
  256. // https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigtimedwait.html
  257. ErrorOr<FlatPtr> Process::sys$sigtimedwait(Userspace<const sigset_t*> set, Userspace<siginfo_t*> info, Userspace<const timespec*> timeout)
  258. {
  259. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
  260. TRY(require_promise(Pledge::sigaction));
  261. sigset_t set_value;
  262. TRY(copy_from_user(&set_value, set));
  263. Thread::BlockTimeout block_timeout = {};
  264. if (timeout) {
  265. auto timeout_time = TRY(copy_time_from_user(timeout));
  266. block_timeout = Thread::BlockTimeout(false, &timeout_time);
  267. }
  268. siginfo_t info_value = {};
  269. auto block_result = Thread::current()->block<Thread::SignalBlocker>(block_timeout, set_value, info_value);
  270. if (block_result.was_interrupted())
  271. return EINTR;
  272. // We check for an unset signal instead of directly checking for a timeout interruption
  273. // in order to allow polling the pending signals by setting the timeout to 0.
  274. if (info_value.si_signo == SIGINVAL) {
  275. VERIFY(block_result == Thread::BlockResult::InterruptedByTimeout);
  276. return EAGAIN;
  277. }
  278. if (info)
  279. TRY(copy_to_user(info, &info_value));
  280. return info_value.si_signo;
  281. }
  282. }