Thread.cpp 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Demangle.h>
  27. #include <AK/StringBuilder.h>
  28. #include <AK/Time.h>
  29. #include <Kernel/Arch/i386/CPU.h>
  30. #include <Kernel/FileSystem/FileDescription.h>
  31. #include <Kernel/KSyms.h>
  32. #include <Kernel/Process.h>
  33. #include <Kernel/Profiling.h>
  34. #include <Kernel/Scheduler.h>
  35. #include <Kernel/Thread.h>
  36. #include <Kernel/ThreadTracer.h>
  37. #include <Kernel/TimerQueue.h>
  38. #include <Kernel/VM/MemoryManager.h>
  39. #include <Kernel/VM/PageDirectory.h>
  40. #include <Kernel/VM/ProcessPagingScope.h>
  41. #include <LibC/signal_numbers.h>
  42. #include <LibELF/Loader.h>
  43. //#define SIGNAL_DEBUG
  44. //#define THREAD_DEBUG
  45. namespace Kernel {
  46. Thread::Thread(NonnullRefPtr<Process> process)
  47. : m_process(move(process))
  48. , m_name(m_process->name())
  49. {
  50. if (m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0) {
  51. // First thread gets TID == PID
  52. m_tid = m_process->pid().value();
  53. } else {
  54. m_tid = Process::allocate_pid().value();
  55. }
  56. #ifdef THREAD_DEBUG
  57. dbg() << "Created new thread " << m_process->name() << "(" << m_process->pid().value() << ":" << m_tid.value() << ")";
  58. #endif
  59. set_default_signal_dispositions();
  60. m_fpu_state = (FPUState*)kmalloc_aligned<16>(sizeof(FPUState));
  61. reset_fpu_state();
  62. memset(&m_tss, 0, sizeof(m_tss));
  63. m_tss.iomapbase = sizeof(TSS32);
  64. // Only IF is set when a process boots.
  65. m_tss.eflags = 0x0202;
  66. if (m_process->is_kernel_process()) {
  67. m_tss.cs = GDT_SELECTOR_CODE0;
  68. m_tss.ds = GDT_SELECTOR_DATA0;
  69. m_tss.es = GDT_SELECTOR_DATA0;
  70. m_tss.fs = GDT_SELECTOR_PROC;
  71. m_tss.ss = GDT_SELECTOR_DATA0;
  72. m_tss.gs = 0;
  73. } else {
  74. m_tss.cs = GDT_SELECTOR_CODE3 | 3;
  75. m_tss.ds = GDT_SELECTOR_DATA3 | 3;
  76. m_tss.es = GDT_SELECTOR_DATA3 | 3;
  77. m_tss.fs = GDT_SELECTOR_DATA3 | 3;
  78. m_tss.ss = GDT_SELECTOR_DATA3 | 3;
  79. m_tss.gs = GDT_SELECTOR_TLS | 3;
  80. }
  81. m_tss.cr3 = m_process->page_directory().cr3();
  82. m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid.value()), Region::Access::Read | Region::Access::Write, false, true);
  83. m_kernel_stack_region->set_stack(true);
  84. m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
  85. m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
  86. if (m_process->is_kernel_process()) {
  87. m_tss.esp = m_tss.esp0 = m_kernel_stack_top;
  88. } else {
  89. // Ring 3 processes get a separate stack for ring 0.
  90. // The ring 3 stack will be assigned by exec().
  91. m_tss.ss0 = GDT_SELECTOR_DATA0;
  92. m_tss.esp0 = m_kernel_stack_top;
  93. }
  94. // We need to add another reference if we could successfully create
  95. // all the resources needed for this thread. The reason for this is that
  96. // we don't want to delete this thread after dropping the reference,
  97. // it may still be running or scheduled to be run.
  98. // The finalizer is responsible for dropping this reference once this
  99. // thread is ready to be cleaned up.
  100. ref();
  101. if (m_process->pid() != 0)
  102. Scheduler::init_thread(*this);
  103. }
  104. Thread::~Thread()
  105. {
  106. {
  107. // We need to explicitly remove ourselves from the thread list
  108. // here. We may get pre-empted in the middle of destructing this
  109. // thread, which causes problems if the thread list is iterated.
  110. // Specifically, if this is the last thread of a process, checking
  111. // block conditions would access m_process, which would be in
  112. // the middle of being destroyed.
  113. ScopedSpinLock lock(g_scheduler_lock);
  114. g_scheduler_data->thread_list_for_state(m_state).remove(*this);
  115. }
  116. }
  117. void Thread::unblock_from_blocker(Blocker& blocker)
  118. {
  119. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  120. ScopedSpinLock lock(m_lock);
  121. if (m_blocker != &blocker)
  122. return;
  123. if (!is_stopped())
  124. unblock();
  125. }
  126. void Thread::unblock(u8 signal)
  127. {
  128. ASSERT(g_scheduler_lock.own_lock());
  129. ASSERT(m_lock.own_lock());
  130. if (m_state != Thread::Blocked)
  131. return;
  132. ASSERT(m_blocker);
  133. if (signal != 0)
  134. m_blocker->set_interrupted_by_signal(signal);
  135. m_blocker = nullptr;
  136. if (Thread::current() == this) {
  137. set_state(Thread::Running);
  138. return;
  139. }
  140. ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
  141. set_state(Thread::Runnable);
  142. }
  143. void Thread::set_should_die()
  144. {
  145. if (m_should_die) {
  146. #ifdef THREAD_DEBUG
  147. dbg() << *this << " Should already die";
  148. #endif
  149. return;
  150. }
  151. ScopedCritical critical;
  152. // Remember that we should die instead of returning to
  153. // the userspace.
  154. {
  155. ScopedSpinLock lock(g_scheduler_lock);
  156. m_should_die = true;
  157. // NOTE: Even the current thread can technically be in "Stopped"
  158. // state! This is the case when another thread sent a SIGSTOP to
  159. // it while it was running and it calls e.g. exit() before
  160. // the scheduler gets involved again.
  161. if (is_stopped()) {
  162. // If we were stopped, we need to briefly resume so that
  163. // the kernel stacks can clean up. We won't ever return back
  164. // to user mode, though
  165. resume_from_stopped();
  166. } else if (state() == Queued) {
  167. // m_queue can only be accessed safely if g_scheduler_lock is held!
  168. if (m_queue) {
  169. m_queue->dequeue(*this);
  170. m_queue = nullptr;
  171. // Wake the thread
  172. wake_from_queue();
  173. }
  174. }
  175. }
  176. if (is_blocked()) {
  177. ScopedSpinLock lock(m_lock);
  178. ASSERT(m_blocker != nullptr);
  179. // We're blocked in the kernel.
  180. m_blocker->set_interrupted_by_death();
  181. unblock();
  182. }
  183. }
  184. void Thread::die_if_needed()
  185. {
  186. ASSERT(Thread::current() == this);
  187. if (!m_should_die)
  188. return;
  189. unlock_process_if_locked();
  190. ScopedCritical critical;
  191. set_should_die();
  192. // Flag a context switch. Because we're in a critical section,
  193. // Scheduler::yield will actually only mark a pending scontext switch
  194. // Simply leaving the critical section would not necessarily trigger
  195. // a switch.
  196. Scheduler::yield();
  197. // Now leave the critical section so that we can also trigger the
  198. // actual context switch
  199. u32 prev_flags;
  200. Processor::current().clear_critical(prev_flags, false);
  201. dbg() << "die_if_needed returned form clear_critical!!! in irq: " << Processor::current().in_irq();
  202. // We should never get here, but the scoped scheduler lock
  203. // will be released by Scheduler::context_switch again
  204. ASSERT_NOT_REACHED();
  205. }
  206. void Thread::exit(void* exit_value)
  207. {
  208. ASSERT(Thread::current() == this);
  209. m_join_condition.thread_did_exit(exit_value);
  210. set_should_die();
  211. unlock_process_if_locked();
  212. die_if_needed();
  213. }
  214. void Thread::yield_without_holding_big_lock()
  215. {
  216. ASSERT(!g_scheduler_lock.own_lock());
  217. bool did_unlock = unlock_process_if_locked();
  218. // NOTE: Even though we call Scheduler::yield here, unless we happen
  219. // to be outside of a critical section, the yield will be postponed
  220. // until leaving it in relock_process.
  221. Scheduler::yield();
  222. relock_process(did_unlock);
  223. }
  224. bool Thread::unlock_process_if_locked()
  225. {
  226. return process().big_lock().force_unlock_if_locked();
  227. }
  228. void Thread::relock_process(bool did_unlock)
  229. {
  230. // Clearing the critical section may trigger the context switch
  231. // flagged by calling Scheduler::donate_to or Scheduler::yield
  232. // above. We have to do it this way because we intentionally
  233. // leave the critical section here to be able to switch contexts.
  234. u32 prev_flags;
  235. u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
  236. if (did_unlock) {
  237. // We've unblocked, relock the process if needed and carry on.
  238. process().big_lock().lock();
  239. }
  240. // NOTE: We may be on a different CPU now!
  241. Processor::current().restore_critical(prev_crit, prev_flags);
  242. }
  243. auto Thread::sleep(const timespec& duration, timespec* remaining_time) -> BlockResult
  244. {
  245. ASSERT(state() == Thread::Running);
  246. return Thread::current()->block<Thread::SleepBlocker>(nullptr, Thread::BlockTimeout(false, &duration), remaining_time);
  247. }
  248. auto Thread::sleep_until(const timespec& deadline) -> BlockResult
  249. {
  250. ASSERT(state() == Thread::Running);
  251. return Thread::current()->block<Thread::SleepBlocker>(nullptr, Thread::BlockTimeout(true, &deadline));
  252. }
  253. const char* Thread::state_string() const
  254. {
  255. switch (state()) {
  256. case Thread::Invalid:
  257. return "Invalid";
  258. case Thread::Runnable:
  259. return "Runnable";
  260. case Thread::Running:
  261. return "Running";
  262. case Thread::Dying:
  263. return "Dying";
  264. case Thread::Dead:
  265. return "Dead";
  266. case Thread::Stopped:
  267. return "Stopped";
  268. case Thread::Queued:
  269. return "Queued";
  270. case Thread::Blocked: {
  271. ScopedSpinLock lock(m_lock);
  272. ASSERT(m_blocker != nullptr);
  273. return m_blocker->state_string();
  274. }
  275. }
  276. klog() << "Thread::state_string(): Invalid state: " << state();
  277. ASSERT_NOT_REACHED();
  278. return nullptr;
  279. }
  280. void Thread::finalize()
  281. {
  282. ASSERT(Thread::current() == g_finalizer);
  283. ASSERT(Thread::current() != this);
  284. #ifdef LOCK_DEBUG
  285. ASSERT(!m_lock.own_lock());
  286. if (lock_count() > 0) {
  287. dbg() << "Thread " << *this << " leaking " << lock_count() << " Locks!";
  288. ScopedSpinLock list_lock(m_holding_locks_lock);
  289. for (auto& info : m_holding_locks_list)
  290. dbg() << " - " << info.lock->name() << " @ " << info.lock << " locked at " << info.file << ":" << info.line << " count: " << info.count;
  291. ASSERT_NOT_REACHED();
  292. }
  293. #endif
  294. {
  295. ScopedSpinLock lock(g_scheduler_lock);
  296. #ifdef THREAD_DEBUG
  297. dbg() << "Finalizing thread " << *this;
  298. #endif
  299. set_state(Thread::State::Dead);
  300. m_join_condition.thread_finalizing();
  301. }
  302. if (m_dump_backtrace_on_finalization)
  303. dbg() << backtrace_impl();
  304. kfree_aligned(m_fpu_state);
  305. auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
  306. ASSERT(thread_cnt_before != 0);
  307. if (thread_cnt_before == 1)
  308. process().finalize(*this);
  309. else
  310. process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Terminated);
  311. }
  312. void Thread::finalize_dying_threads()
  313. {
  314. ASSERT(Thread::current() == g_finalizer);
  315. Vector<Thread*, 32> dying_threads;
  316. {
  317. ScopedSpinLock lock(g_scheduler_lock);
  318. for_each_in_state(Thread::State::Dying, [&](Thread& thread) {
  319. if (thread.is_finalizable())
  320. dying_threads.append(&thread);
  321. return IterationDecision::Continue;
  322. });
  323. }
  324. for (auto* thread : dying_threads) {
  325. thread->finalize();
  326. // This thread will never execute again, drop the running reference
  327. // NOTE: This may not necessarily drop the last reference if anything
  328. // else is still holding onto this thread!
  329. thread->unref();
  330. }
  331. }
  332. bool Thread::tick()
  333. {
  334. ++m_ticks;
  335. if (tss().cs & 3)
  336. ++m_process->m_ticks_in_user;
  337. else
  338. ++m_process->m_ticks_in_kernel;
  339. return --m_ticks_left;
  340. }
  341. bool Thread::has_pending_signal(u8 signal) const
  342. {
  343. ScopedSpinLock lock(g_scheduler_lock);
  344. return pending_signals_for_state() & (1 << (signal - 1));
  345. }
  346. u32 Thread::pending_signals() const
  347. {
  348. ScopedSpinLock lock(g_scheduler_lock);
  349. return pending_signals_for_state();
  350. }
  351. u32 Thread::pending_signals_for_state() const
  352. {
  353. ASSERT(g_scheduler_lock.own_lock());
  354. constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
  355. return m_state != Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask;
  356. }
  357. void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
  358. {
  359. ASSERT(signal < 32);
  360. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  361. // FIXME: Figure out what to do for masked signals. Should we also ignore them here?
  362. if (should_ignore_signal(signal)) {
  363. #ifdef SIGNAL_DEBUG
  364. dbg() << "Signal " << signal << " was ignored by " << process();
  365. #endif
  366. return;
  367. }
  368. #ifdef SIGNAL_DEBUG
  369. if (sender)
  370. dbg() << "Signal: " << *sender << " sent " << signal << " to " << process();
  371. else
  372. dbg() << "Signal: Kernel sent " << signal << " to " << process();
  373. #endif
  374. m_pending_signals |= 1 << (signal - 1);
  375. m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
  376. ScopedSpinLock lock(m_lock);
  377. if (m_state == Stopped) {
  378. if (pending_signals_for_state())
  379. resume_from_stopped();
  380. } else {
  381. unblock(signal);
  382. }
  383. }
  384. u32 Thread::update_signal_mask(u32 signal_mask)
  385. {
  386. ScopedSpinLock lock(g_scheduler_lock);
  387. auto previous_signal_mask = m_signal_mask;
  388. m_signal_mask = signal_mask;
  389. m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
  390. return previous_signal_mask;
  391. }
  392. u32 Thread::signal_mask() const
  393. {
  394. ScopedSpinLock lock(g_scheduler_lock);
  395. return m_signal_mask;
  396. }
  397. u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
  398. {
  399. ScopedSpinLock lock(g_scheduler_lock);
  400. auto previous_signal_mask = m_signal_mask;
  401. if (block)
  402. m_signal_mask &= ~signal_set;
  403. else
  404. m_signal_mask |= signal_set;
  405. m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
  406. return previous_signal_mask;
  407. }
  408. void Thread::clear_signals()
  409. {
  410. ScopedSpinLock lock(g_scheduler_lock);
  411. m_signal_mask = 0;
  412. m_pending_signals = 0;
  413. m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release);
  414. }
  415. // Certain exceptions, such as SIGSEGV and SIGILL, put a
  416. // thread into a state where the signal handler must be
  417. // invoked immediately, otherwise it will continue to fault.
  418. // This function should be used in an exception handler to
  419. // ensure that when the thread resumes, it's executing in
  420. // the appropriate signal handler.
  421. void Thread::send_urgent_signal_to_self(u8 signal)
  422. {
  423. ASSERT(Thread::current() == this);
  424. DispatchSignalResult result;
  425. {
  426. ScopedSpinLock lock(g_scheduler_lock);
  427. result = dispatch_signal(signal);
  428. }
  429. if (result == DispatchSignalResult::Yield)
  430. yield_without_holding_big_lock();
  431. }
  432. DispatchSignalResult Thread::dispatch_one_pending_signal()
  433. {
  434. ASSERT(m_lock.own_lock());
  435. u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
  436. ASSERT(signal_candidates);
  437. u8 signal = 1;
  438. for (; signal < 32; ++signal) {
  439. if (signal_candidates & (1 << (signal - 1))) {
  440. break;
  441. }
  442. }
  443. return dispatch_signal(signal);
  444. }
  445. DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal)
  446. {
  447. ASSERT(signal != 0);
  448. ScopedSpinLock scheduler_lock(g_scheduler_lock);
  449. ScopedSpinLock lock(m_lock);
  450. u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
  451. if (!(signal_candidates & (1 << (signal - 1))))
  452. return DispatchSignalResult::Continue;
  453. return dispatch_signal(signal);
  454. }
  455. enum class DefaultSignalAction {
  456. Terminate,
  457. Ignore,
  458. DumpCore,
  459. Stop,
  460. Continue,
  461. };
  462. static DefaultSignalAction default_signal_action(u8 signal)
  463. {
  464. ASSERT(signal && signal < NSIG);
  465. switch (signal) {
  466. case SIGHUP:
  467. case SIGINT:
  468. case SIGKILL:
  469. case SIGPIPE:
  470. case SIGALRM:
  471. case SIGUSR1:
  472. case SIGUSR2:
  473. case SIGVTALRM:
  474. case SIGSTKFLT:
  475. case SIGIO:
  476. case SIGPROF:
  477. case SIGTERM:
  478. return DefaultSignalAction::Terminate;
  479. case SIGCHLD:
  480. case SIGURG:
  481. case SIGWINCH:
  482. case SIGINFO:
  483. return DefaultSignalAction::Ignore;
  484. case SIGQUIT:
  485. case SIGILL:
  486. case SIGTRAP:
  487. case SIGABRT:
  488. case SIGBUS:
  489. case SIGFPE:
  490. case SIGSEGV:
  491. case SIGXCPU:
  492. case SIGXFSZ:
  493. case SIGSYS:
  494. return DefaultSignalAction::DumpCore;
  495. case SIGCONT:
  496. return DefaultSignalAction::Continue;
  497. case SIGSTOP:
  498. case SIGTSTP:
  499. case SIGTTIN:
  500. case SIGTTOU:
  501. return DefaultSignalAction::Stop;
  502. }
  503. ASSERT_NOT_REACHED();
  504. }
  505. bool Thread::should_ignore_signal(u8 signal) const
  506. {
  507. ASSERT(signal < 32);
  508. auto& action = m_signal_action_data[signal];
  509. if (action.handler_or_sigaction.is_null())
  510. return default_signal_action(signal) == DefaultSignalAction::Ignore;
  511. if (action.handler_or_sigaction.as_ptr() == SIG_IGN)
  512. return true;
  513. return false;
  514. }
  515. bool Thread::has_signal_handler(u8 signal) const
  516. {
  517. ASSERT(signal < 32);
  518. auto& action = m_signal_action_data[signal];
  519. return !action.handler_or_sigaction.is_null();
  520. }
  521. static bool push_value_on_user_stack(u32* stack, u32 data)
  522. {
  523. *stack -= 4;
  524. return copy_to_user((u32*)*stack, &data);
  525. }
  526. void Thread::resume_from_stopped()
  527. {
  528. ASSERT(is_stopped());
  529. ASSERT(m_stop_state != State::Invalid);
  530. ASSERT(g_scheduler_lock.own_lock());
  531. set_state(m_stop_state != Blocked ? m_stop_state : Runnable);
  532. }
  533. DispatchSignalResult Thread::dispatch_signal(u8 signal)
  534. {
  535. ASSERT_INTERRUPTS_DISABLED();
  536. ASSERT(g_scheduler_lock.own_lock());
  537. ASSERT(signal > 0 && signal <= 32);
  538. ASSERT(process().is_user_process());
  539. ASSERT(this == Thread::current());
  540. #ifdef SIGNAL_DEBUG
  541. klog() << "signal: dispatch signal " << signal << " to " << *this;
  542. #endif
  543. if (m_state == Invalid || !is_initialized()) {
  544. // Thread has barely been created, we need to wait until it is
  545. // at least in Runnable state and is_initialized() returns true,
  546. // which indicates that it is fully set up an we actually have
  547. // a register state on the stack that we can modify
  548. return DispatchSignalResult::Deferred;
  549. }
  550. if (is_stopped() && signal != SIGCONT && signal != SIGKILL && signal != SIGTRAP) {
  551. #ifdef SIGNAL_DEBUG
  552. klog() << "signal: " << *this << " is stopped, will handle signal " << signal << " when resumed";
  553. #endif
  554. return DispatchSignalResult::Deferred;
  555. }
  556. auto& action = m_signal_action_data[signal];
  557. // FIXME: Implement SA_SIGINFO signal handlers.
  558. ASSERT(!(action.flags & SA_SIGINFO));
  559. // Mark this signal as handled.
  560. m_pending_signals &= ~(1 << (signal - 1));
  561. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  562. auto* thread_tracer = tracer();
  563. if (signal == SIGSTOP || (thread_tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
  564. if (!is_stopped()) {
  565. #ifdef SIGNAL_DEBUG
  566. dbg() << "signal: signal " << signal << " stopping thread " << *this;
  567. #endif
  568. m_stop_signal = signal;
  569. set_state(State::Stopped);
  570. }
  571. return DispatchSignalResult::Yield;
  572. }
  573. if (signal == SIGCONT && is_stopped()) {
  574. #ifdef SIGNAL_DEBUG
  575. dbg() << "signal: SIGCONT resuming " << *this << " from stopped";
  576. #endif
  577. resume_from_stopped();
  578. } else {
  579. if (thread_tracer != nullptr) {
  580. // when a thread is traced, it should be stopped whenever it receives a signal
  581. // the tracer is notified of this by using waitpid()
  582. // only "pending signals" from the tracer are sent to the tracee
  583. if (!thread_tracer->has_pending_signal(signal)) {
  584. m_stop_signal = signal;
  585. #ifdef SIGNAL_DEBUG
  586. dbg() << "signal: " << signal << " stopping " << *this << " for tracer";
  587. #endif
  588. set_state(Stopped);
  589. return DispatchSignalResult::Yield;
  590. }
  591. thread_tracer->unset_signal(signal);
  592. }
  593. }
  594. auto handler_vaddr = action.handler_or_sigaction;
  595. if (handler_vaddr.is_null()) {
  596. switch (default_signal_action(signal)) {
  597. case DefaultSignalAction::Stop:
  598. m_stop_signal = signal;
  599. set_state(Stopped);
  600. return DispatchSignalResult::Yield;
  601. case DefaultSignalAction::DumpCore:
  602. process().for_each_thread([](auto& thread) {
  603. thread.set_dump_backtrace_on_finalization();
  604. return IterationDecision::Continue;
  605. });
  606. [[fallthrough]];
  607. case DefaultSignalAction::Terminate:
  608. m_process->terminate_due_to_signal(signal);
  609. return DispatchSignalResult::Terminate;
  610. case DefaultSignalAction::Ignore:
  611. ASSERT_NOT_REACHED();
  612. case DefaultSignalAction::Continue:
  613. return DispatchSignalResult::Continue;
  614. }
  615. ASSERT_NOT_REACHED();
  616. }
  617. if (handler_vaddr.as_ptr() == SIG_IGN) {
  618. #ifdef SIGNAL_DEBUG
  619. klog() << "signal: " << *this << " ignored signal " << signal;
  620. #endif
  621. return DispatchSignalResult::Continue;
  622. }
  623. ProcessPagingScope paging_scope(m_process);
  624. u32 old_signal_mask = m_signal_mask;
  625. u32 new_signal_mask = action.mask;
  626. if (action.flags & SA_NODEFER)
  627. new_signal_mask &= ~(1 << (signal - 1));
  628. else
  629. new_signal_mask |= 1 << (signal - 1);
  630. m_signal_mask |= new_signal_mask;
  631. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  632. auto setup_stack = [&](RegisterState& state) {
  633. u32* stack = &state.userspace_esp;
  634. u32 old_esp = *stack;
  635. u32 ret_eip = state.eip;
  636. u32 ret_eflags = state.eflags;
  637. #ifdef SIGNAL_DEBUG
  638. klog() << "signal: setting up user stack to return to eip: " << String::format("%p", ret_eip) << " esp: " << String::format("%p", old_esp);
  639. #endif
  640. // Align the stack to 16 bytes.
  641. // Note that we push 56 bytes (4 * 14) on to the stack,
  642. // so we need to account for this here.
  643. u32 stack_alignment = (*stack - 56) % 16;
  644. *stack -= stack_alignment;
  645. push_value_on_user_stack(stack, ret_eflags);
  646. push_value_on_user_stack(stack, ret_eip);
  647. push_value_on_user_stack(stack, state.eax);
  648. push_value_on_user_stack(stack, state.ecx);
  649. push_value_on_user_stack(stack, state.edx);
  650. push_value_on_user_stack(stack, state.ebx);
  651. push_value_on_user_stack(stack, old_esp);
  652. push_value_on_user_stack(stack, state.ebp);
  653. push_value_on_user_stack(stack, state.esi);
  654. push_value_on_user_stack(stack, state.edi);
  655. // PUSH old_signal_mask
  656. push_value_on_user_stack(stack, old_signal_mask);
  657. push_value_on_user_stack(stack, signal);
  658. push_value_on_user_stack(stack, handler_vaddr.get());
  659. push_value_on_user_stack(stack, 0); //push fake return address
  660. ASSERT((*stack % 16) == 0);
  661. };
  662. // We now place the thread state on the userspace stack.
  663. // Note that we use a RegisterState.
  664. // Conversely, when the thread isn't blocking the RegisterState may not be
  665. // valid (fork, exec etc) but the tss will, so we use that instead.
  666. auto& regs = get_register_dump_from_stack();
  667. setup_stack(regs);
  668. regs.eip = g_return_to_ring3_from_signal_trampoline.get();
  669. #ifdef SIGNAL_DEBUG
  670. klog() << "signal: Okay, " << *this << " {" << state_string() << "} has been primed with signal handler " << String::format("%w", m_tss.cs) << ":" << String::format("%x", m_tss.eip) << " to deliver " << signal;
  671. #endif
  672. return DispatchSignalResult::Continue;
  673. }
  674. void Thread::set_default_signal_dispositions()
  675. {
  676. // FIXME: Set up all the right default actions. See signal(7).
  677. memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
  678. m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress(SIG_IGN);
  679. m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress(SIG_IGN);
  680. }
  681. bool Thread::push_value_on_stack(FlatPtr value)
  682. {
  683. m_tss.esp -= 4;
  684. FlatPtr* stack_ptr = (FlatPtr*)m_tss.esp;
  685. return copy_to_user(stack_ptr, &value);
  686. }
  687. RegisterState& Thread::get_register_dump_from_stack()
  688. {
  689. return *(RegisterState*)(kernel_stack_top() - sizeof(RegisterState));
  690. }
  691. KResultOr<u32> Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment, Vector<AuxiliaryValue> auxiliary_values)
  692. {
  693. auto* region = m_process->allocate_region(VirtualAddress(), default_userspace_stack_size, "Stack (Main thread)", PROT_READ | PROT_WRITE, false);
  694. if (!region)
  695. return KResult(-ENOMEM);
  696. region->set_stack(true);
  697. FlatPtr new_esp = region->vaddr().offset(default_userspace_stack_size).get();
  698. auto push_on_new_stack = [&new_esp](u32 value) {
  699. new_esp -= 4;
  700. Userspace<u32*> stack_ptr = new_esp;
  701. return copy_to_user(stack_ptr, &value);
  702. };
  703. auto push_aux_value_on_new_stack = [&new_esp](auxv_t value) {
  704. new_esp -= sizeof(auxv_t);
  705. Userspace<auxv_t*> stack_ptr = new_esp;
  706. return copy_to_user(stack_ptr, &value);
  707. };
  708. auto push_string_on_new_stack = [&new_esp](const String& string) {
  709. new_esp -= round_up_to_power_of_two(string.length() + 1, 4);
  710. Userspace<u32*> stack_ptr = new_esp;
  711. return copy_to_user(stack_ptr, string.characters(), string.length() + 1);
  712. };
  713. Vector<FlatPtr> argv_entries;
  714. for (auto& argument : arguments) {
  715. push_string_on_new_stack(argument);
  716. argv_entries.append(new_esp);
  717. }
  718. Vector<FlatPtr> env_entries;
  719. for (auto& variable : environment) {
  720. push_string_on_new_stack(variable);
  721. env_entries.append(new_esp);
  722. }
  723. for (auto& value : auxiliary_values) {
  724. if (!value.optional_string.is_empty()) {
  725. push_string_on_new_stack(value.optional_string);
  726. value.auxv.a_un.a_ptr = (void*)new_esp;
  727. }
  728. }
  729. for (ssize_t i = auxiliary_values.size() - 1; i >= 0; --i) {
  730. auto& value = auxiliary_values[i];
  731. push_aux_value_on_new_stack(value.auxv);
  732. }
  733. push_on_new_stack(0);
  734. for (ssize_t i = env_entries.size() - 1; i >= 0; --i)
  735. push_on_new_stack(env_entries[i]);
  736. FlatPtr envp = new_esp;
  737. push_on_new_stack(0);
  738. for (ssize_t i = argv_entries.size() - 1; i >= 0; --i)
  739. push_on_new_stack(argv_entries[i]);
  740. FlatPtr argv = new_esp;
  741. // NOTE: The stack needs to be 16-byte aligned.
  742. new_esp -= new_esp % 16;
  743. push_on_new_stack((FlatPtr)envp);
  744. push_on_new_stack((FlatPtr)argv);
  745. push_on_new_stack((FlatPtr)argv_entries.size());
  746. push_on_new_stack(0);
  747. return new_esp;
  748. }
  749. RefPtr<Thread> Thread::clone(Process& process)
  750. {
  751. auto clone = adopt(*new Thread(process));
  752. memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
  753. clone->m_signal_mask = m_signal_mask;
  754. memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
  755. clone->m_thread_specific_data = m_thread_specific_data;
  756. clone->m_thread_specific_region_size = m_thread_specific_region_size;
  757. return clone;
  758. }
  759. void Thread::set_state(State new_state)
  760. {
  761. ASSERT(g_scheduler_lock.own_lock());
  762. if (new_state == m_state)
  763. return;
  764. if (new_state == Blocked) {
  765. // we should always have a Blocker while blocked
  766. ASSERT(m_blocker != nullptr);
  767. }
  768. auto previous_state = m_state;
  769. if (previous_state == Invalid) {
  770. // If we were *just* created, we may have already pending signals
  771. ScopedSpinLock thread_lock(m_lock);
  772. if (has_unmasked_pending_signals()) {
  773. dbg() << "Dispatch pending signals to new thread " << *this;
  774. dispatch_one_pending_signal();
  775. }
  776. }
  777. m_state = new_state;
  778. #ifdef THREAD_DEBUG
  779. dbg() << "Set Thread " << *this << " state to " << state_string();
  780. #endif
  781. if (m_process->pid() != 0) {
  782. update_state_for_thread(previous_state);
  783. ASSERT(g_scheduler_data->has_thread(*this));
  784. }
  785. if (previous_state == Stopped) {
  786. m_stop_state = State::Invalid;
  787. process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Continued);
  788. }
  789. if (m_state == Stopped) {
  790. // We don't want to restore to Running state, only Runnable!
  791. m_stop_state = previous_state != Running ? m_state : Runnable;
  792. process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Stopped, m_stop_signal);
  793. } else if (m_state == Dying) {
  794. ASSERT(previous_state != Queued);
  795. if (this != Thread::current() && is_finalizable()) {
  796. // Some other thread set this thread to Dying, notify the
  797. // finalizer right away as it can be cleaned up now
  798. Scheduler::notify_finalizer();
  799. }
  800. }
  801. }
  802. void Thread::update_state_for_thread(Thread::State previous_state)
  803. {
  804. ASSERT_INTERRUPTS_DISABLED();
  805. ASSERT(g_scheduler_data);
  806. ASSERT(g_scheduler_lock.own_lock());
  807. auto& previous_list = g_scheduler_data->thread_list_for_state(previous_state);
  808. auto& list = g_scheduler_data->thread_list_for_state(state());
  809. if (&previous_list != &list) {
  810. previous_list.remove(*this);
  811. }
  812. if (list.contains(*this))
  813. return;
  814. list.append(*this);
  815. }
  816. String Thread::backtrace()
  817. {
  818. return backtrace_impl();
  819. }
  820. struct RecognizedSymbol {
  821. u32 address;
  822. const KernelSymbol* symbol { nullptr };
  823. };
  824. static bool symbolicate(const RecognizedSymbol& symbol, const Process& process, StringBuilder& builder, Process::ELFBundle* elf_bundle)
  825. {
  826. if (!symbol.address)
  827. return false;
  828. bool mask_kernel_addresses = !process.is_superuser();
  829. if (!symbol.symbol) {
  830. if (!is_user_address(VirtualAddress(symbol.address))) {
  831. builder.append("0xdeadc0de\n");
  832. } else {
  833. if (elf_bundle && elf_bundle->elf_loader->has_symbols())
  834. builder.appendf("%p %s\n", symbol.address, elf_bundle->elf_loader->symbolicate(symbol.address).characters());
  835. else
  836. builder.appendf("%p\n", symbol.address);
  837. }
  838. return true;
  839. }
  840. unsigned offset = symbol.address - symbol.symbol->address;
  841. if (symbol.symbol->address == g_highest_kernel_symbol_address && offset > 4096) {
  842. builder.appendf("%p\n", mask_kernel_addresses ? 0xdeadc0de : symbol.address);
  843. } else {
  844. builder.appendf("%p %s +%u\n", mask_kernel_addresses ? 0xdeadc0de : symbol.address, demangle(symbol.symbol->name).characters(), offset);
  845. }
  846. return true;
  847. }
  848. String Thread::backtrace_impl()
  849. {
  850. Vector<RecognizedSymbol, 128> recognized_symbols;
  851. auto& process = const_cast<Process&>(this->process());
  852. OwnPtr<Process::ELFBundle> elf_bundle;
  853. if (!Processor::current().in_irq()) {
  854. // If we're handling IRQs we can't really safely symbolicate
  855. elf_bundle = process.elf_bundle();
  856. }
  857. ProcessPagingScope paging_scope(process);
  858. // To prevent a context switch involving this thread, which may happen
  859. // on another processor, we need to acquire the scheduler lock while
  860. // walking the stack
  861. {
  862. ScopedSpinLock lock(g_scheduler_lock);
  863. FlatPtr stack_ptr, eip;
  864. if (Processor::get_context_frame_ptr(*this, stack_ptr, eip)) {
  865. recognized_symbols.append({ eip, symbolicate_kernel_address(eip) });
  866. while (stack_ptr) {
  867. FlatPtr retaddr;
  868. if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
  869. if (!copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]))
  870. break;
  871. recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
  872. if (!copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr))
  873. break;
  874. } else {
  875. void* fault_at;
  876. if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at))
  877. break;
  878. recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
  879. if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at))
  880. break;
  881. }
  882. }
  883. }
  884. }
  885. StringBuilder builder;
  886. for (auto& symbol : recognized_symbols) {
  887. if (!symbolicate(symbol, process, builder, elf_bundle.ptr()))
  888. break;
  889. }
  890. return builder.to_string();
  891. }
  892. Vector<FlatPtr> Thread::raw_backtrace(FlatPtr ebp, FlatPtr eip) const
  893. {
  894. InterruptDisabler disabler;
  895. auto& process = const_cast<Process&>(this->process());
  896. ProcessPagingScope paging_scope(process);
  897. Vector<FlatPtr, Profiling::max_stack_frame_count> backtrace;
  898. backtrace.append(eip);
  899. FlatPtr stack_ptr_copy;
  900. FlatPtr stack_ptr = (FlatPtr)ebp;
  901. while (stack_ptr) {
  902. void* fault_at;
  903. if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
  904. break;
  905. FlatPtr retaddr;
  906. if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
  907. break;
  908. backtrace.append(retaddr);
  909. if (backtrace.size() == Profiling::max_stack_frame_count)
  910. break;
  911. stack_ptr = stack_ptr_copy;
  912. }
  913. return backtrace;
  914. }
  915. KResult Thread::make_thread_specific_region(Badge<Process>)
  916. {
  917. size_t thread_specific_region_alignment = max(process().m_master_tls_alignment, alignof(ThreadSpecificData));
  918. m_thread_specific_region_size = align_up_to(process().m_master_tls_size, thread_specific_region_alignment) + sizeof(ThreadSpecificData);
  919. auto* region = process().allocate_region({}, m_thread_specific_region_size, "Thread-specific", PROT_READ | PROT_WRITE, true);
  920. if (!region)
  921. return KResult(-ENOMEM);
  922. SmapDisabler disabler;
  923. auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment)).as_ptr();
  924. auto* thread_local_storage = (u8*)((u8*)thread_specific_data) - align_up_to(process().m_master_tls_size, process().m_master_tls_alignment);
  925. m_thread_specific_data = VirtualAddress(thread_specific_data);
  926. thread_specific_data->self = thread_specific_data;
  927. if (process().m_master_tls_size)
  928. memcpy(thread_local_storage, process().m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), process().m_master_tls_size);
  929. return KSuccess;
  930. }
  931. const LogStream& operator<<(const LogStream& stream, const Thread& value)
  932. {
  933. return stream << value.process().name() << "(" << value.pid().value() << ":" << value.tid().value() << ")";
  934. }
  935. Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const BlockTimeout& timeout, Atomic<bool>* lock, RefPtr<Thread> beneficiary)
  936. {
  937. auto* current_thread = Thread::current();
  938. RefPtr<Timer> timer;
  939. bool block_finished = false;
  940. bool did_timeout = false;
  941. bool did_unlock;
  942. {
  943. ScopedCritical critical;
  944. // We need to be in a critical section *and* then also acquire the
  945. // scheduler lock. The only way acquiring the scheduler lock could
  946. // block us is if another core were to be holding it, in which case
  947. // we need to wait until the scheduler lock is released again
  948. {
  949. ScopedSpinLock sched_lock(g_scheduler_lock);
  950. if (!timeout.is_infinite()) {
  951. timer = TimerQueue::the().add_timer_without_id(timeout.absolute_time(), [&]() {
  952. // NOTE: this may execute on the same or any other processor!
  953. ScopedSpinLock lock(g_scheduler_lock);
  954. if (!block_finished) {
  955. did_timeout = true;
  956. wake_from_queue();
  957. }
  958. });
  959. if (!timer) {
  960. if (lock)
  961. *lock = false;
  962. // We timed out already, don't block
  963. return BlockResult::InterruptedByTimeout;
  964. }
  965. }
  966. // m_queue can only be accessed safely if g_scheduler_lock is held!
  967. m_queue = &queue;
  968. if (!queue.enqueue(*current_thread)) {
  969. // The WaitQueue was already requested to wake someone when
  970. // nobody was waiting. So return right away as we shouldn't
  971. // be waiting
  972. // NOTE: Do not set lock to false in this case!
  973. return BlockResult::NotBlocked;
  974. }
  975. if (lock)
  976. *lock = false;
  977. did_unlock = unlock_process_if_locked();
  978. set_state(State::Queued);
  979. m_wait_reason = reason;
  980. // Yield and wait for the queue to wake us up again.
  981. if (beneficiary)
  982. Scheduler::donate_to(beneficiary, reason);
  983. else
  984. Scheduler::yield();
  985. }
  986. // We've unblocked, relock the process if needed and carry on.
  987. relock_process(did_unlock);
  988. // This looks counter productive, but we may not actually leave
  989. // the critical section we just restored. It depends on whether
  990. // we were in one while being called.
  991. if (current_thread->should_die()) {
  992. // We're being unblocked so that we can clean up. We shouldn't
  993. // be in Dying state until we're about to return back to user mode
  994. ASSERT(current_thread->state() == Thread::Running);
  995. #ifdef THREAD_DEBUG
  996. dbg() << "Dying thread " << *current_thread << " was unblocked";
  997. #endif
  998. }
  999. }
  1000. BlockResult result(BlockResult::WokeNormally);
  1001. {
  1002. // To be able to look at m_wait_queue_node we once again need the
  1003. // scheduler lock, which is held when we insert into the queue
  1004. ScopedSpinLock sched_lock(g_scheduler_lock);
  1005. block_finished = true;
  1006. if (m_queue) {
  1007. ASSERT(m_queue == &queue);
  1008. // If our thread was still in the queue, we timed out
  1009. m_queue = nullptr;
  1010. if (queue.dequeue(*current_thread))
  1011. result = BlockResult::InterruptedByTimeout;
  1012. } else {
  1013. // Our thread was already removed from the queue. The only
  1014. // way this can happen if someone else is trying to kill us.
  1015. // In this case, the queue should not contain us anymore.
  1016. result = BlockResult::InterruptedByDeath;
  1017. }
  1018. }
  1019. if (timer && !did_timeout) {
  1020. // Cancel the timer while not holding any locks. This allows
  1021. // the timer function to complete before we remove it
  1022. // (e.g. if it's on another processor)
  1023. TimerQueue::the().cancel_timer(timer.release_nonnull());
  1024. }
  1025. return result;
  1026. }
  1027. void Thread::wake_from_queue()
  1028. {
  1029. ScopedSpinLock lock(g_scheduler_lock);
  1030. ASSERT(state() == State::Queued);
  1031. m_wait_reason = nullptr;
  1032. if (this != Thread::current())
  1033. set_state(State::Runnable);
  1034. else
  1035. set_state(State::Running);
  1036. }
  1037. RefPtr<Thread> Thread::from_tid(ThreadID tid)
  1038. {
  1039. RefPtr<Thread> found_thread;
  1040. ScopedSpinLock lock(g_scheduler_lock);
  1041. Thread::for_each([&](auto& thread) {
  1042. if (thread.tid() == tid) {
  1043. found_thread = &thread;
  1044. return IterationDecision::Break;
  1045. }
  1046. return IterationDecision::Continue;
  1047. });
  1048. return found_thread;
  1049. }
  1050. void Thread::reset_fpu_state()
  1051. {
  1052. memcpy(m_fpu_state, &Processor::current().clean_fpu_state(), sizeof(FPUState));
  1053. }
  1054. void Thread::start_tracing_from(ProcessID tracer)
  1055. {
  1056. m_tracer = ThreadTracer::create(tracer);
  1057. }
  1058. void Thread::stop_tracing()
  1059. {
  1060. m_tracer = nullptr;
  1061. }
  1062. void Thread::tracer_trap(const RegisterState& regs)
  1063. {
  1064. ASSERT(m_tracer.ptr());
  1065. m_tracer->set_regs(regs);
  1066. send_urgent_signal_to_self(SIGTRAP);
  1067. }
  1068. const Thread::Blocker& Thread::blocker() const
  1069. {
  1070. ASSERT(m_lock.own_lock());
  1071. ASSERT(m_blocker);
  1072. return *m_blocker;
  1073. }
  1074. }