Thread.cpp 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Demangle.h>
  27. #include <AK/StringBuilder.h>
  28. #include <Kernel/Arch/i386/CPU.h>
  29. #include <Kernel/FileSystem/FileDescription.h>
  30. #include <Kernel/KSyms.h>
  31. #include <Kernel/Process.h>
  32. #include <Kernel/Profiling.h>
  33. #include <Kernel/Scheduler.h>
  34. #include <Kernel/Thread.h>
  35. #include <Kernel/ThreadTracer.h>
  36. #include <Kernel/TimerQueue.h>
  37. #include <Kernel/VM/MemoryManager.h>
  38. #include <Kernel/VM/PageDirectory.h>
  39. #include <Kernel/VM/ProcessPagingScope.h>
  40. #include <LibC/signal_numbers.h>
  41. #include <LibELF/Loader.h>
  42. //#define SIGNAL_DEBUG
  43. //#define THREAD_DEBUG
  44. namespace Kernel {
  45. Thread::Thread(NonnullRefPtr<Process> process)
  46. : m_process(move(process))
  47. , m_name(m_process->name())
  48. {
  49. if (m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel) == 0) {
  50. // First thread gets TID == PID
  51. m_tid = m_process->pid().value();
  52. } else {
  53. m_tid = Process::allocate_pid().value();
  54. }
  55. #ifdef THREAD_DEBUG
  56. dbg() << "Created new thread " << m_process->name() << "(" << m_process->pid().value() << ":" << m_tid.value() << ")";
  57. #endif
  58. set_default_signal_dispositions();
  59. m_fpu_state = (FPUState*)kmalloc_aligned<16>(sizeof(FPUState));
  60. reset_fpu_state();
  61. memset(&m_tss, 0, sizeof(m_tss));
  62. m_tss.iomapbase = sizeof(TSS32);
  63. // Only IF is set when a process boots.
  64. m_tss.eflags = 0x0202;
  65. if (m_process->is_kernel_process()) {
  66. m_tss.cs = GDT_SELECTOR_CODE0;
  67. m_tss.ds = GDT_SELECTOR_DATA0;
  68. m_tss.es = GDT_SELECTOR_DATA0;
  69. m_tss.fs = GDT_SELECTOR_PROC;
  70. m_tss.ss = GDT_SELECTOR_DATA0;
  71. m_tss.gs = 0;
  72. } else {
  73. m_tss.cs = GDT_SELECTOR_CODE3 | 3;
  74. m_tss.ds = GDT_SELECTOR_DATA3 | 3;
  75. m_tss.es = GDT_SELECTOR_DATA3 | 3;
  76. m_tss.fs = GDT_SELECTOR_DATA3 | 3;
  77. m_tss.ss = GDT_SELECTOR_DATA3 | 3;
  78. m_tss.gs = GDT_SELECTOR_TLS | 3;
  79. }
  80. m_tss.cr3 = m_process->page_directory().cr3();
  81. m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid.value()), Region::Access::Read | Region::Access::Write, false, true);
  82. m_kernel_stack_region->set_stack(true);
  83. m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
  84. m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
  85. if (m_process->is_kernel_process()) {
  86. m_tss.esp = m_tss.esp0 = m_kernel_stack_top;
  87. } else {
  88. // Ring 3 processes get a separate stack for ring 0.
  89. // The ring 3 stack will be assigned by exec().
  90. m_tss.ss0 = GDT_SELECTOR_DATA0;
  91. m_tss.esp0 = m_kernel_stack_top;
  92. }
  93. // We need to add another reference if we could successfully create
  94. // all the resources needed for this thread. The reason for this is that
  95. // we don't want to delete this thread after dropping the reference,
  96. // it may still be running or scheduled to be run.
  97. // The finalizer is responsible for dropping this reference once this
  98. // thread is ready to be cleaned up.
  99. ref();
  100. if (m_process->pid() != 0)
  101. Scheduler::init_thread(*this);
  102. }
  103. Thread::~Thread()
  104. {
  105. {
  106. // We need to explicitly remove ourselves from the thread list
  107. // here. We may get pre-empted in the middle of destructing this
  108. // thread, which causes problems if the thread list is iterated.
  109. // Specifically, if this is the last thread of a process, checking
  110. // block conditions would access m_process, which would be in
  111. // the middle of being destroyed.
  112. ScopedSpinLock lock(g_scheduler_lock);
  113. g_scheduler_data->thread_list_for_state(m_state).remove(*this);
  114. }
  115. ASSERT(!m_joiner);
  116. }
  117. void Thread::unblock()
  118. {
  119. ASSERT(g_scheduler_lock.own_lock());
  120. ASSERT(m_lock.own_lock());
  121. m_blocker = nullptr;
  122. if (Thread::current() == this) {
  123. set_state(Thread::Running);
  124. return;
  125. }
  126. ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
  127. set_state(Thread::Runnable);
  128. }
  129. void Thread::set_should_die()
  130. {
  131. if (m_should_die) {
  132. #ifdef THREAD_DEBUG
  133. dbg() << *this << " Should already die";
  134. #endif
  135. return;
  136. }
  137. ScopedCritical critical;
  138. // Remember that we should die instead of returning to
  139. // the userspace.
  140. {
  141. ScopedSpinLock lock(g_scheduler_lock);
  142. m_should_die = true;
  143. // NOTE: Even the current thread can technically be in "Stopped"
  144. // state! This is the case when another thread sent a SIGSTOP to
  145. // it while it was running and it calls e.g. exit() before
  146. // the scheduler gets involved again.
  147. if (is_stopped()) {
  148. // If we were stopped, we need to briefly resume so that
  149. // the kernel stacks can clean up. We won't ever return back
  150. // to user mode, though
  151. resume_from_stopped();
  152. } else if (state() == Queued) {
  153. // m_queue can only be accessed safely if g_scheduler_lock is held!
  154. if (m_queue) {
  155. m_queue->dequeue(*this);
  156. m_queue = nullptr;
  157. // Wake the thread
  158. wake_from_queue();
  159. }
  160. }
  161. }
  162. if (is_blocked()) {
  163. ScopedSpinLock lock(m_lock);
  164. ASSERT(m_blocker != nullptr);
  165. // We're blocked in the kernel.
  166. m_blocker->set_interrupted_by_death();
  167. unblock();
  168. }
  169. }
  170. void Thread::die_if_needed()
  171. {
  172. ASSERT(Thread::current() == this);
  173. if (!m_should_die)
  174. return;
  175. unlock_process_if_locked();
  176. ScopedCritical critical;
  177. set_should_die();
  178. // Flag a context switch. Because we're in a critical section,
  179. // Scheduler::yield will actually only mark a pending scontext switch
  180. // Simply leaving the critical section would not necessarily trigger
  181. // a switch.
  182. Scheduler::yield();
  183. // Now leave the critical section so that we can also trigger the
  184. // actual context switch
  185. u32 prev_flags;
  186. Processor::current().clear_critical(prev_flags, false);
  187. dbg() << "die_if_needed returned form clear_critical!!! in irq: " << Processor::current().in_irq();
  188. // We should never get here, but the scoped scheduler lock
  189. // will be released by Scheduler::context_switch again
  190. ASSERT_NOT_REACHED();
  191. }
  192. void Thread::yield_without_holding_big_lock()
  193. {
  194. bool did_unlock = unlock_process_if_locked();
  195. // NOTE: Even though we call Scheduler::yield here, unless we happen
  196. // to be outside of a critical section, the yield will be postponed
  197. // until leaving it in relock_process.
  198. Scheduler::yield();
  199. relock_process(did_unlock);
  200. }
  201. bool Thread::unlock_process_if_locked()
  202. {
  203. return process().big_lock().force_unlock_if_locked();
  204. }
  205. void Thread::relock_process(bool did_unlock)
  206. {
  207. // Clearing the critical section may trigger the context switch
  208. // flagged by calling Scheduler::donate_to or Scheduler::yield
  209. // above. We have to do it this way because we intentionally
  210. // leave the critical section here to be able to switch contexts.
  211. u32 prev_flags;
  212. u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
  213. if (did_unlock) {
  214. // We've unblocked, relock the process if needed and carry on.
  215. process().big_lock().lock();
  216. }
  217. // NOTE: We may be on a different CPU now!
  218. Processor::current().restore_critical(prev_crit, prev_flags);
  219. }
  220. u64 Thread::sleep(u64 ticks)
  221. {
  222. ASSERT(state() == Thread::Running);
  223. u64 wakeup_time = g_uptime + ticks;
  224. auto ret = Thread::current()->block<Thread::SleepBlocker>(nullptr, wakeup_time);
  225. if (wakeup_time > g_uptime) {
  226. ASSERT(ret.was_interrupted());
  227. }
  228. return wakeup_time;
  229. }
  230. u64 Thread::sleep_until(u64 wakeup_time)
  231. {
  232. ASSERT(state() == Thread::Running);
  233. auto ret = Thread::current()->block<Thread::SleepBlocker>(nullptr, wakeup_time);
  234. if (wakeup_time > g_uptime)
  235. ASSERT(ret.was_interrupted());
  236. return wakeup_time;
  237. }
  238. const char* Thread::state_string() const
  239. {
  240. switch (state()) {
  241. case Thread::Invalid:
  242. return "Invalid";
  243. case Thread::Runnable:
  244. return "Runnable";
  245. case Thread::Running:
  246. return "Running";
  247. case Thread::Dying:
  248. return "Dying";
  249. case Thread::Dead:
  250. return "Dead";
  251. case Thread::Stopped:
  252. return "Stopped";
  253. case Thread::Queued:
  254. return "Queued";
  255. case Thread::Blocked: {
  256. ScopedSpinLock lock(m_lock);
  257. ASSERT(m_blocker != nullptr);
  258. return m_blocker->state_string();
  259. }
  260. }
  261. klog() << "Thread::state_string(): Invalid state: " << state();
  262. ASSERT_NOT_REACHED();
  263. return nullptr;
  264. }
  265. void Thread::finalize()
  266. {
  267. ASSERT(Thread::current() == g_finalizer);
  268. ASSERT(Thread::current() != this);
  269. ASSERT(!m_lock.own_lock());
  270. {
  271. ScopedSpinLock lock(g_scheduler_lock);
  272. #ifdef THREAD_DEBUG
  273. dbg() << "Finalizing thread " << *this;
  274. #endif
  275. set_state(Thread::State::Dead);
  276. if (auto* joiner = m_joiner.exchange(nullptr, AK::memory_order_acq_rel)) {
  277. // Notify joiner that we exited
  278. static_cast<JoinBlocker*>(joiner->m_blocker)->joinee_exited(m_exit_value);
  279. }
  280. }
  281. if (m_dump_backtrace_on_finalization)
  282. dbg() << backtrace_impl();
  283. kfree_aligned(m_fpu_state);
  284. auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
  285. ASSERT(thread_cnt_before != 0);
  286. if (thread_cnt_before == 1)
  287. process().finalize();
  288. }
  289. void Thread::finalize_dying_threads()
  290. {
  291. ASSERT(Thread::current() == g_finalizer);
  292. Vector<Thread*, 32> dying_threads;
  293. {
  294. ScopedSpinLock lock(g_scheduler_lock);
  295. for_each_in_state(Thread::State::Dying, [&](Thread& thread) {
  296. if (thread.is_finalizable())
  297. dying_threads.append(&thread);
  298. return IterationDecision::Continue;
  299. });
  300. }
  301. for (auto* thread : dying_threads) {
  302. thread->finalize();
  303. // This thread will never execute again, drop the running reference
  304. // NOTE: This may not necessarily drop the last reference if anything
  305. // else is still holding onto this thread!
  306. thread->unref();
  307. }
  308. }
  309. bool Thread::tick()
  310. {
  311. ++m_ticks;
  312. if (tss().cs & 3)
  313. ++m_process->m_ticks_in_user;
  314. else
  315. ++m_process->m_ticks_in_kernel;
  316. return --m_ticks_left;
  317. }
  318. bool Thread::has_pending_signal(u8 signal) const
  319. {
  320. ScopedSpinLock lock(g_scheduler_lock);
  321. return m_pending_signals & (1 << (signal - 1));
  322. }
  323. u32 Thread::pending_signals() const
  324. {
  325. ScopedSpinLock lock(g_scheduler_lock);
  326. return m_pending_signals;
  327. }
  328. void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
  329. {
  330. ASSERT(signal < 32);
  331. ScopedSpinLock lock(g_scheduler_lock);
  332. // FIXME: Figure out what to do for masked signals. Should we also ignore them here?
  333. if (should_ignore_signal(signal)) {
  334. #ifdef SIGNAL_DEBUG
  335. dbg() << "Signal " << signal << " was ignored by " << process();
  336. #endif
  337. return;
  338. }
  339. #ifdef SIGNAL_DEBUG
  340. if (sender)
  341. dbg() << "Signal: " << *sender << " sent " << signal << " to " << process();
  342. else
  343. dbg() << "Signal: Kernel sent " << signal << " to " << process();
  344. #endif
  345. m_pending_signals |= 1 << (signal - 1);
  346. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  347. }
  348. u32 Thread::update_signal_mask(u32 signal_mask)
  349. {
  350. ScopedSpinLock lock(g_scheduler_lock);
  351. auto previous_signal_mask = m_signal_mask;
  352. m_signal_mask = signal_mask;
  353. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  354. return previous_signal_mask;
  355. }
  356. u32 Thread::signal_mask() const
  357. {
  358. ScopedSpinLock lock(g_scheduler_lock);
  359. return m_signal_mask;
  360. }
  361. u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
  362. {
  363. ScopedSpinLock lock(g_scheduler_lock);
  364. auto previous_signal_mask = m_signal_mask;
  365. if (block)
  366. m_signal_mask &= ~signal_set;
  367. else
  368. m_signal_mask |= signal_set;
  369. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  370. return previous_signal_mask;
  371. }
  372. void Thread::clear_signals()
  373. {
  374. ScopedSpinLock lock(g_scheduler_lock);
  375. m_signal_mask = 0;
  376. m_pending_signals = 0;
  377. m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release);
  378. }
  379. // Certain exceptions, such as SIGSEGV and SIGILL, put a
  380. // thread into a state where the signal handler must be
  381. // invoked immediately, otherwise it will continue to fault.
  382. // This function should be used in an exception handler to
  383. // ensure that when the thread resumes, it's executing in
  384. // the appropriate signal handler.
  385. void Thread::send_urgent_signal_to_self(u8 signal)
  386. {
  387. ASSERT(Thread::current() == this);
  388. ScopedSpinLock lock(g_scheduler_lock);
  389. if (dispatch_signal(signal) == ShouldUnblockThread::No)
  390. Scheduler::yield();
  391. }
  392. ShouldUnblockThread Thread::dispatch_one_pending_signal()
  393. {
  394. ASSERT(m_lock.own_lock());
  395. u32 signal_candidates = m_pending_signals & ~m_signal_mask;
  396. ASSERT(signal_candidates);
  397. u8 signal = 1;
  398. for (; signal < 32; ++signal) {
  399. if (signal_candidates & (1 << (signal - 1))) {
  400. break;
  401. }
  402. }
  403. return dispatch_signal(signal);
  404. }
  405. enum class DefaultSignalAction {
  406. Terminate,
  407. Ignore,
  408. DumpCore,
  409. Stop,
  410. Continue,
  411. };
  412. static DefaultSignalAction default_signal_action(u8 signal)
  413. {
  414. ASSERT(signal && signal < NSIG);
  415. switch (signal) {
  416. case SIGHUP:
  417. case SIGINT:
  418. case SIGKILL:
  419. case SIGPIPE:
  420. case SIGALRM:
  421. case SIGUSR1:
  422. case SIGUSR2:
  423. case SIGVTALRM:
  424. case SIGSTKFLT:
  425. case SIGIO:
  426. case SIGPROF:
  427. case SIGTERM:
  428. return DefaultSignalAction::Terminate;
  429. case SIGCHLD:
  430. case SIGURG:
  431. case SIGWINCH:
  432. case SIGINFO:
  433. return DefaultSignalAction::Ignore;
  434. case SIGQUIT:
  435. case SIGILL:
  436. case SIGTRAP:
  437. case SIGABRT:
  438. case SIGBUS:
  439. case SIGFPE:
  440. case SIGSEGV:
  441. case SIGXCPU:
  442. case SIGXFSZ:
  443. case SIGSYS:
  444. return DefaultSignalAction::DumpCore;
  445. case SIGCONT:
  446. return DefaultSignalAction::Continue;
  447. case SIGSTOP:
  448. case SIGTSTP:
  449. case SIGTTIN:
  450. case SIGTTOU:
  451. return DefaultSignalAction::Stop;
  452. }
  453. ASSERT_NOT_REACHED();
  454. }
  455. bool Thread::should_ignore_signal(u8 signal) const
  456. {
  457. ASSERT(signal < 32);
  458. auto& action = m_signal_action_data[signal];
  459. if (action.handler_or_sigaction.is_null())
  460. return default_signal_action(signal) == DefaultSignalAction::Ignore;
  461. if (action.handler_or_sigaction.as_ptr() == SIG_IGN)
  462. return true;
  463. return false;
  464. }
  465. bool Thread::has_signal_handler(u8 signal) const
  466. {
  467. ASSERT(signal < 32);
  468. auto& action = m_signal_action_data[signal];
  469. return !action.handler_or_sigaction.is_null();
  470. }
  471. static bool push_value_on_user_stack(u32* stack, u32 data)
  472. {
  473. *stack -= 4;
  474. return copy_to_user((u32*)*stack, &data);
  475. }
  476. void Thread::resume_from_stopped()
  477. {
  478. ASSERT(is_stopped());
  479. ASSERT(m_stop_state != State::Invalid);
  480. ASSERT(g_scheduler_lock.own_lock());
  481. set_state(m_stop_state);
  482. m_stop_state = State::Invalid;
  483. // make sure SemiPermanentBlocker is unblocked
  484. if (m_state != Thread::Runnable && m_state != Thread::Running) {
  485. ScopedSpinLock lock(m_lock);
  486. if (m_blocker && m_blocker->is_reason_signal())
  487. unblock();
  488. }
  489. }
  490. ShouldUnblockThread Thread::dispatch_signal(u8 signal)
  491. {
  492. ASSERT_INTERRUPTS_DISABLED();
  493. ASSERT(g_scheduler_lock.own_lock());
  494. ASSERT(signal > 0 && signal <= 32);
  495. ASSERT(process().is_user_process());
  496. #ifdef SIGNAL_DEBUG
  497. klog() << "signal: dispatch signal " << signal << " to " << *this;
  498. #endif
  499. if (m_state == Invalid || !is_initialized()) {
  500. // Thread has barely been created, we need to wait until it is
  501. // at least in Runnable state and is_initialized() returns true,
  502. // which indicates that it is fully set up an we actually have
  503. // a register state on the stack that we can modify
  504. return ShouldUnblockThread::No;
  505. }
  506. auto& action = m_signal_action_data[signal];
  507. // FIXME: Implement SA_SIGINFO signal handlers.
  508. ASSERT(!(action.flags & SA_SIGINFO));
  509. // Mark this signal as handled.
  510. m_pending_signals &= ~(1 << (signal - 1));
  511. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  512. if (signal == SIGSTOP) {
  513. if (!is_stopped()) {
  514. m_stop_signal = SIGSTOP;
  515. set_state(State::Stopped);
  516. }
  517. return ShouldUnblockThread::No;
  518. }
  519. if (signal == SIGCONT && is_stopped()) {
  520. resume_from_stopped();
  521. } else {
  522. auto* thread_tracer = tracer();
  523. if (thread_tracer != nullptr) {
  524. // when a thread is traced, it should be stopped whenever it receives a signal
  525. // the tracer is notified of this by using waitpid()
  526. // only "pending signals" from the tracer are sent to the tracee
  527. if (!thread_tracer->has_pending_signal(signal)) {
  528. m_stop_signal = signal;
  529. // make sure SemiPermanentBlocker is unblocked
  530. ScopedSpinLock lock(m_lock);
  531. if (m_blocker && m_blocker->is_reason_signal())
  532. unblock();
  533. set_state(Stopped);
  534. return ShouldUnblockThread::No;
  535. }
  536. thread_tracer->unset_signal(signal);
  537. }
  538. }
  539. auto handler_vaddr = action.handler_or_sigaction;
  540. if (handler_vaddr.is_null()) {
  541. switch (default_signal_action(signal)) {
  542. case DefaultSignalAction::Stop:
  543. m_stop_signal = signal;
  544. set_state(Stopped);
  545. return ShouldUnblockThread::No;
  546. case DefaultSignalAction::DumpCore:
  547. process().for_each_thread([](auto& thread) {
  548. thread.set_dump_backtrace_on_finalization();
  549. return IterationDecision::Continue;
  550. });
  551. [[fallthrough]];
  552. case DefaultSignalAction::Terminate:
  553. m_process->terminate_due_to_signal(signal);
  554. return ShouldUnblockThread::No;
  555. case DefaultSignalAction::Ignore:
  556. ASSERT_NOT_REACHED();
  557. case DefaultSignalAction::Continue:
  558. return ShouldUnblockThread::Yes;
  559. }
  560. ASSERT_NOT_REACHED();
  561. }
  562. if (handler_vaddr.as_ptr() == SIG_IGN) {
  563. #ifdef SIGNAL_DEBUG
  564. klog() << "signal: " << *this << " ignored signal " << signal;
  565. #endif
  566. return ShouldUnblockThread::Yes;
  567. }
  568. ProcessPagingScope paging_scope(m_process);
  569. u32 old_signal_mask = m_signal_mask;
  570. u32 new_signal_mask = action.mask;
  571. if (action.flags & SA_NODEFER)
  572. new_signal_mask &= ~(1 << (signal - 1));
  573. else
  574. new_signal_mask |= 1 << (signal - 1);
  575. m_signal_mask |= new_signal_mask;
  576. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  577. auto setup_stack = [&](RegisterState& state) {
  578. u32* stack = &state.userspace_esp;
  579. u32 old_esp = *stack;
  580. u32 ret_eip = state.eip;
  581. u32 ret_eflags = state.eflags;
  582. #ifdef SIGNAL_DEBUG
  583. klog() << "signal: setting up user stack to return to eip: " << String::format("%p", ret_eip) << " esp: " << String::format("%p", old_esp);
  584. #endif
  585. // Align the stack to 16 bytes.
  586. // Note that we push 56 bytes (4 * 14) on to the stack,
  587. // so we need to account for this here.
  588. u32 stack_alignment = (*stack - 56) % 16;
  589. *stack -= stack_alignment;
  590. push_value_on_user_stack(stack, ret_eflags);
  591. push_value_on_user_stack(stack, ret_eip);
  592. push_value_on_user_stack(stack, state.eax);
  593. push_value_on_user_stack(stack, state.ecx);
  594. push_value_on_user_stack(stack, state.edx);
  595. push_value_on_user_stack(stack, state.ebx);
  596. push_value_on_user_stack(stack, old_esp);
  597. push_value_on_user_stack(stack, state.ebp);
  598. push_value_on_user_stack(stack, state.esi);
  599. push_value_on_user_stack(stack, state.edi);
  600. // PUSH old_signal_mask
  601. push_value_on_user_stack(stack, old_signal_mask);
  602. push_value_on_user_stack(stack, signal);
  603. push_value_on_user_stack(stack, handler_vaddr.get());
  604. push_value_on_user_stack(stack, 0); //push fake return address
  605. ASSERT((*stack % 16) == 0);
  606. };
  607. // We now place the thread state on the userspace stack.
  608. // Note that we use a RegisterState.
  609. // Conversely, when the thread isn't blocking the RegisterState may not be
  610. // valid (fork, exec etc) but the tss will, so we use that instead.
  611. auto& regs = get_register_dump_from_stack();
  612. setup_stack(regs);
  613. regs.eip = g_return_to_ring3_from_signal_trampoline.get();
  614. #ifdef SIGNAL_DEBUG
  615. klog() << "signal: Okay, " << *this << " {" << state_string() << "} has been primed with signal handler " << String::format("%w", m_tss.cs) << ":" << String::format("%x", m_tss.eip) << " to deliver " << signal;
  616. #endif
  617. return ShouldUnblockThread::Yes;
  618. }
  619. void Thread::set_default_signal_dispositions()
  620. {
  621. // FIXME: Set up all the right default actions. See signal(7).
  622. memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
  623. m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress(SIG_IGN);
  624. m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress(SIG_IGN);
  625. }
  626. bool Thread::push_value_on_stack(FlatPtr value)
  627. {
  628. m_tss.esp -= 4;
  629. FlatPtr* stack_ptr = (FlatPtr*)m_tss.esp;
  630. return copy_to_user(stack_ptr, &value);
  631. }
  632. RegisterState& Thread::get_register_dump_from_stack()
  633. {
  634. return *(RegisterState*)(kernel_stack_top() - sizeof(RegisterState));
  635. }
  636. KResultOr<u32> Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment, Vector<AuxiliaryValue> auxiliary_values)
  637. {
  638. auto* region = m_process->allocate_region(VirtualAddress(), default_userspace_stack_size, "Stack (Main thread)", PROT_READ | PROT_WRITE, false);
  639. if (!region)
  640. return KResult(-ENOMEM);
  641. region->set_stack(true);
  642. FlatPtr new_esp = region->vaddr().offset(default_userspace_stack_size).get();
  643. auto push_on_new_stack = [&new_esp](u32 value) {
  644. new_esp -= 4;
  645. Userspace<u32*> stack_ptr = new_esp;
  646. return copy_to_user(stack_ptr, &value);
  647. };
  648. auto push_aux_value_on_new_stack = [&new_esp](auxv_t value) {
  649. new_esp -= sizeof(auxv_t);
  650. Userspace<auxv_t*> stack_ptr = new_esp;
  651. return copy_to_user(stack_ptr, &value);
  652. };
  653. auto push_string_on_new_stack = [&new_esp](const String& string) {
  654. new_esp -= round_up_to_power_of_two(string.length() + 1, 4);
  655. Userspace<u32*> stack_ptr = new_esp;
  656. return copy_to_user(stack_ptr, string.characters(), string.length() + 1);
  657. };
  658. Vector<FlatPtr> argv_entries;
  659. for (auto& argument : arguments) {
  660. push_string_on_new_stack(argument);
  661. argv_entries.append(new_esp);
  662. }
  663. Vector<FlatPtr> env_entries;
  664. for (auto& variable : environment) {
  665. push_string_on_new_stack(variable);
  666. env_entries.append(new_esp);
  667. }
  668. for (auto& value : auxiliary_values) {
  669. if (!value.optional_string.is_empty()) {
  670. push_string_on_new_stack(value.optional_string);
  671. value.auxv.a_un.a_ptr = (void*)new_esp;
  672. }
  673. }
  674. for (ssize_t i = auxiliary_values.size() - 1; i >= 0; --i) {
  675. auto& value = auxiliary_values[i];
  676. push_aux_value_on_new_stack(value.auxv);
  677. }
  678. push_on_new_stack(0);
  679. for (ssize_t i = env_entries.size() - 1; i >= 0; --i)
  680. push_on_new_stack(env_entries[i]);
  681. FlatPtr envp = new_esp;
  682. push_on_new_stack(0);
  683. for (ssize_t i = argv_entries.size() - 1; i >= 0; --i)
  684. push_on_new_stack(argv_entries[i]);
  685. FlatPtr argv = new_esp;
  686. // NOTE: The stack needs to be 16-byte aligned.
  687. new_esp -= new_esp % 16;
  688. push_on_new_stack((FlatPtr)envp);
  689. push_on_new_stack((FlatPtr)argv);
  690. push_on_new_stack((FlatPtr)argv_entries.size());
  691. push_on_new_stack(0);
  692. return new_esp;
  693. }
  694. RefPtr<Thread> Thread::clone(Process& process)
  695. {
  696. auto clone = adopt(*new Thread(process));
  697. memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
  698. clone->m_signal_mask = m_signal_mask;
  699. memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
  700. clone->m_thread_specific_data = m_thread_specific_data;
  701. clone->m_thread_specific_region_size = m_thread_specific_region_size;
  702. return clone;
  703. }
  704. void Thread::set_state(State new_state)
  705. {
  706. ASSERT(g_scheduler_lock.own_lock());
  707. if (new_state == m_state)
  708. return;
  709. if (new_state == Blocked) {
  710. // we should always have a Blocker while blocked
  711. ASSERT(m_blocker != nullptr);
  712. }
  713. auto previous_state = m_state;
  714. if (previous_state == Invalid) {
  715. // If we were *just* created, we may have already pending signals
  716. ScopedSpinLock thread_lock(m_lock);
  717. if (has_unmasked_pending_signals()) {
  718. dbg() << "Dispatch pending signals to new thread " << *this;
  719. dispatch_one_pending_signal();
  720. }
  721. }
  722. if (new_state == Stopped) {
  723. m_stop_state = m_state;
  724. }
  725. m_state = new_state;
  726. #ifdef THREAD_DEBUG
  727. dbg() << "Set Thread " << *this << " state to " << state_string();
  728. #endif
  729. if (m_process->pid() != 0) {
  730. update_state_for_thread(previous_state);
  731. ASSERT(g_scheduler_data->has_thread(*this));
  732. }
  733. if (m_state == Dying) {
  734. ASSERT(previous_state != Queued);
  735. if (this != Thread::current() && is_finalizable()) {
  736. // Some other thread set this thread to Dying, notify the
  737. // finalizer right away as it can be cleaned up now
  738. Scheduler::notify_finalizer();
  739. }
  740. }
  741. }
  742. void Thread::update_state_for_thread(Thread::State previous_state)
  743. {
  744. ASSERT_INTERRUPTS_DISABLED();
  745. ASSERT(g_scheduler_data);
  746. ASSERT(g_scheduler_lock.own_lock());
  747. auto& previous_list = g_scheduler_data->thread_list_for_state(previous_state);
  748. auto& list = g_scheduler_data->thread_list_for_state(state());
  749. if (&previous_list != &list) {
  750. previous_list.remove(*this);
  751. }
  752. if (list.contains(*this))
  753. return;
  754. list.append(*this);
  755. }
  756. String Thread::backtrace()
  757. {
  758. return backtrace_impl();
  759. }
  760. struct RecognizedSymbol {
  761. u32 address;
  762. const KernelSymbol* symbol { nullptr };
  763. };
  764. static bool symbolicate(const RecognizedSymbol& symbol, const Process& process, StringBuilder& builder, Process::ELFBundle* elf_bundle)
  765. {
  766. if (!symbol.address)
  767. return false;
  768. bool mask_kernel_addresses = !process.is_superuser();
  769. if (!symbol.symbol) {
  770. if (!is_user_address(VirtualAddress(symbol.address))) {
  771. builder.append("0xdeadc0de\n");
  772. } else {
  773. if (elf_bundle && elf_bundle->elf_loader->has_symbols())
  774. builder.appendf("%p %s\n", symbol.address, elf_bundle->elf_loader->symbolicate(symbol.address).characters());
  775. else
  776. builder.appendf("%p\n", symbol.address);
  777. }
  778. return true;
  779. }
  780. unsigned offset = symbol.address - symbol.symbol->address;
  781. if (symbol.symbol->address == g_highest_kernel_symbol_address && offset > 4096) {
  782. builder.appendf("%p\n", mask_kernel_addresses ? 0xdeadc0de : symbol.address);
  783. } else {
  784. builder.appendf("%p %s +%u\n", mask_kernel_addresses ? 0xdeadc0de : symbol.address, demangle(symbol.symbol->name).characters(), offset);
  785. }
  786. return true;
  787. }
  788. String Thread::backtrace_impl()
  789. {
  790. Vector<RecognizedSymbol, 128> recognized_symbols;
  791. auto& process = const_cast<Process&>(this->process());
  792. OwnPtr<Process::ELFBundle> elf_bundle;
  793. if (!Processor::current().in_irq()) {
  794. // If we're handling IRQs we can't really safely symbolicate
  795. elf_bundle = process.elf_bundle();
  796. }
  797. ProcessPagingScope paging_scope(process);
  798. // To prevent a context switch involving this thread, which may happen
  799. // on another processor, we need to acquire the scheduler lock while
  800. // walking the stack
  801. {
  802. ScopedSpinLock lock(g_scheduler_lock);
  803. FlatPtr stack_ptr, eip;
  804. if (Processor::get_context_frame_ptr(*this, stack_ptr, eip)) {
  805. recognized_symbols.append({ eip, symbolicate_kernel_address(eip) });
  806. while (stack_ptr) {
  807. FlatPtr retaddr;
  808. if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
  809. if (!copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]))
  810. break;
  811. recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
  812. if (!copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr))
  813. break;
  814. } else {
  815. void* fault_at;
  816. if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at))
  817. break;
  818. recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
  819. if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at))
  820. break;
  821. }
  822. }
  823. }
  824. }
  825. StringBuilder builder;
  826. for (auto& symbol : recognized_symbols) {
  827. if (!symbolicate(symbol, process, builder, elf_bundle.ptr()))
  828. break;
  829. }
  830. return builder.to_string();
  831. }
  832. Vector<FlatPtr> Thread::raw_backtrace(FlatPtr ebp, FlatPtr eip) const
  833. {
  834. InterruptDisabler disabler;
  835. auto& process = const_cast<Process&>(this->process());
  836. ProcessPagingScope paging_scope(process);
  837. Vector<FlatPtr, Profiling::max_stack_frame_count> backtrace;
  838. backtrace.append(eip);
  839. FlatPtr stack_ptr_copy;
  840. FlatPtr stack_ptr = (FlatPtr)ebp;
  841. while (stack_ptr) {
  842. void* fault_at;
  843. if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
  844. break;
  845. FlatPtr retaddr;
  846. if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
  847. break;
  848. backtrace.append(retaddr);
  849. if (backtrace.size() == Profiling::max_stack_frame_count)
  850. break;
  851. stack_ptr = stack_ptr_copy;
  852. }
  853. return backtrace;
  854. }
  855. KResult Thread::make_thread_specific_region(Badge<Process>)
  856. {
  857. size_t thread_specific_region_alignment = max(process().m_master_tls_alignment, alignof(ThreadSpecificData));
  858. m_thread_specific_region_size = align_up_to(process().m_master_tls_size, thread_specific_region_alignment) + sizeof(ThreadSpecificData);
  859. auto* region = process().allocate_region({}, m_thread_specific_region_size, "Thread-specific", PROT_READ | PROT_WRITE, true);
  860. if (!region)
  861. return KResult(-ENOMEM);
  862. SmapDisabler disabler;
  863. auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment)).as_ptr();
  864. auto* thread_local_storage = (u8*)((u8*)thread_specific_data) - align_up_to(process().m_master_tls_size, process().m_master_tls_alignment);
  865. m_thread_specific_data = VirtualAddress(thread_specific_data);
  866. thread_specific_data->self = thread_specific_data;
  867. if (process().m_master_tls_size)
  868. memcpy(thread_local_storage, process().m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), process().m_master_tls_size);
  869. return KSuccess;
  870. }
  871. const LogStream& operator<<(const LogStream& stream, const Thread& value)
  872. {
  873. return stream << value.process().name() << "(" << value.pid().value() << ":" << value.tid().value() << ")";
  874. }
  875. Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, RefPtr<Thread> beneficiary)
  876. {
  877. auto* current_thread = Thread::current();
  878. TimerId timer_id {};
  879. bool did_unlock;
  880. {
  881. ScopedCritical critical;
  882. // We need to be in a critical section *and* then also acquire the
  883. // scheduler lock. The only way acquiring the scheduler lock could
  884. // block us is if another core were to be holding it, in which case
  885. // we need to wait until the scheduler lock is released again
  886. {
  887. ScopedSpinLock sched_lock(g_scheduler_lock);
  888. // m_queue can only be accessed safely if g_scheduler_lock is held!
  889. m_queue = &queue;
  890. if (!queue.enqueue(*current_thread)) {
  891. // The WaitQueue was already requested to wake someone when
  892. // nobody was waiting. So return right away as we shouldn't
  893. // be waiting
  894. // The API contract guarantees we return with interrupts enabled,
  895. // regardless of how we got called
  896. critical.set_interrupt_flag_on_destruction(true);
  897. return BlockResult::NotBlocked;
  898. }
  899. did_unlock = unlock_process_if_locked();
  900. if (lock)
  901. *lock = false;
  902. set_state(State::Queued);
  903. m_wait_reason = reason;
  904. if (timeout) {
  905. timer_id = TimerQueue::the().add_timer(*timeout, [&]() {
  906. wake_from_queue();
  907. });
  908. }
  909. // Yield and wait for the queue to wake us up again.
  910. if (beneficiary)
  911. Scheduler::donate_to(beneficiary, reason);
  912. else
  913. Scheduler::yield();
  914. }
  915. // We've unblocked, relock the process if needed and carry on.
  916. relock_process(did_unlock);
  917. // This looks counter productive, but we may not actually leave
  918. // the critical section we just restored. It depends on whether
  919. // we were in one while being called.
  920. if (current_thread->should_die()) {
  921. // We're being unblocked so that we can clean up. We shouldn't
  922. // be in Dying state until we're about to return back to user mode
  923. ASSERT(current_thread->state() == Thread::Running);
  924. #ifdef THREAD_DEBUG
  925. dbg() << "Dying thread " << *current_thread << " was unblocked";
  926. #endif
  927. }
  928. }
  929. BlockResult result(BlockResult::WokeNormally);
  930. {
  931. // To be able to look at m_wait_queue_node we once again need the
  932. // scheduler lock, which is held when we insert into the queue
  933. ScopedSpinLock sched_lock(g_scheduler_lock);
  934. if (m_queue) {
  935. ASSERT(m_queue == &queue);
  936. // If our thread was still in the queue, we timed out
  937. m_queue = nullptr;
  938. if (queue.dequeue(*current_thread))
  939. result = BlockResult::InterruptedByTimeout;
  940. } else {
  941. // Our thread was already removed from the queue. The only
  942. // way this can happen if someone else is trying to kill us.
  943. // In this case, the queue should not contain us anymore.
  944. result = BlockResult::InterruptedByDeath;
  945. }
  946. // Make sure we cancel the timer if woke normally.
  947. if (timeout && !result.was_interrupted())
  948. TimerQueue::the().cancel_timer(timer_id);
  949. }
  950. // The API contract guarantees we return with interrupts enabled,
  951. // regardless of how we got called
  952. sti();
  953. return result;
  954. }
  955. void Thread::wake_from_queue()
  956. {
  957. ScopedSpinLock lock(g_scheduler_lock);
  958. ASSERT(state() == State::Queued);
  959. m_wait_reason = nullptr;
  960. if (this != Thread::current())
  961. set_state(State::Runnable);
  962. else
  963. set_state(State::Running);
  964. }
  965. RefPtr<Thread> Thread::from_tid(ThreadID tid)
  966. {
  967. RefPtr<Thread> found_thread;
  968. ScopedSpinLock lock(g_scheduler_lock);
  969. Thread::for_each([&](auto& thread) {
  970. if (thread.tid() == tid) {
  971. found_thread = &thread;
  972. return IterationDecision::Break;
  973. }
  974. return IterationDecision::Continue;
  975. });
  976. return found_thread;
  977. }
  978. void Thread::reset_fpu_state()
  979. {
  980. memcpy(m_fpu_state, &Processor::current().clean_fpu_state(), sizeof(FPUState));
  981. }
  982. void Thread::start_tracing_from(ProcessID tracer)
  983. {
  984. m_tracer = ThreadTracer::create(tracer);
  985. }
  986. void Thread::stop_tracing()
  987. {
  988. m_tracer = nullptr;
  989. }
  990. void Thread::tracer_trap(const RegisterState& regs)
  991. {
  992. ASSERT(m_tracer.ptr());
  993. m_tracer->set_regs(regs);
  994. send_urgent_signal_to_self(SIGTRAP);
  995. }
  996. const Thread::Blocker& Thread::blocker() const
  997. {
  998. ASSERT(m_lock.own_lock());
  999. ASSERT(m_blocker);
  1000. return *m_blocker;
  1001. }
  1002. }