Thread.cpp 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/ScopeGuard.h>
  7. #include <AK/Singleton.h>
  8. #include <AK/StringBuilder.h>
  9. #include <AK/Time.h>
  10. #include <Kernel/Arch/x86/SmapDisabler.h>
  11. #include <Kernel/Arch/x86/TrapFrame.h>
  12. #include <Kernel/Debug.h>
  13. #include <Kernel/Devices/KCOVDevice.h>
  14. #include <Kernel/FileSystem/FileDescription.h>
  15. #include <Kernel/KSyms.h>
  16. #include <Kernel/Memory/MemoryManager.h>
  17. #include <Kernel/Memory/PageDirectory.h>
  18. #include <Kernel/Memory/ProcessPagingScope.h>
  19. #include <Kernel/Panic.h>
  20. #include <Kernel/PerformanceEventBuffer.h>
  21. #include <Kernel/Process.h>
  22. #include <Kernel/ProcessExposed.h>
  23. #include <Kernel/Scheduler.h>
  24. #include <Kernel/Sections.h>
  25. #include <Kernel/Thread.h>
  26. #include <Kernel/ThreadTracer.h>
  27. #include <Kernel/TimerQueue.h>
  28. #include <LibC/signal_numbers.h>
  29. namespace Kernel {
  30. static Singleton<SpinlockProtected<Thread::GlobalList>> s_list;
  31. SpinlockProtected<Thread::GlobalList>& Thread::all_instances()
  32. {
  33. return *s_list;
  34. }
  35. KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
  36. {
  37. auto kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
  38. if (!kernel_stack_region)
  39. return ENOMEM;
  40. kernel_stack_region->set_stack(true);
  41. auto block_timer = try_make_ref_counted<Timer>();
  42. if (!block_timer)
  43. return ENOMEM;
  44. auto name = KString::try_create(process->name());
  45. return adopt_nonnull_ref_or_enomem(new (nothrow) Thread(move(process), kernel_stack_region.release_nonnull(), block_timer.release_nonnull(), move(name)));
  46. }
  47. Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> kernel_stack_region, NonnullRefPtr<Timer> block_timer, OwnPtr<KString> name)
  48. : m_process(move(process))
  49. , m_kernel_stack_region(move(kernel_stack_region))
  50. , m_name(move(name))
  51. , m_block_timer(move(block_timer))
  52. {
  53. bool is_first_thread = m_process->add_thread(*this);
  54. if (is_first_thread) {
  55. // First thread gets TID == PID
  56. m_tid = m_process->pid().value();
  57. } else {
  58. m_tid = Process::allocate_pid().value();
  59. }
  60. {
  61. // FIXME: Go directly to KString
  62. auto string = String::formatted("Kernel stack (thread {})", m_tid.value());
  63. m_kernel_stack_region->set_name(KString::try_create(string));
  64. }
  65. Thread::all_instances().with([&](auto& list) {
  66. list.append(*this);
  67. });
  68. if constexpr (THREAD_DEBUG)
  69. dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
  70. reset_fpu_state();
  71. // Only IF is set when a process boots.
  72. m_regs.set_flags(0x0202);
  73. #if ARCH(I386)
  74. if (m_process->is_kernel_process()) {
  75. m_regs.cs = GDT_SELECTOR_CODE0;
  76. m_regs.ds = GDT_SELECTOR_DATA0;
  77. m_regs.es = GDT_SELECTOR_DATA0;
  78. m_regs.fs = 0;
  79. m_regs.ss = GDT_SELECTOR_DATA0;
  80. m_regs.gs = GDT_SELECTOR_PROC;
  81. } else {
  82. m_regs.cs = GDT_SELECTOR_CODE3 | 3;
  83. m_regs.ds = GDT_SELECTOR_DATA3 | 3;
  84. m_regs.es = GDT_SELECTOR_DATA3 | 3;
  85. m_regs.fs = GDT_SELECTOR_DATA3 | 3;
  86. m_regs.ss = GDT_SELECTOR_DATA3 | 3;
  87. m_regs.gs = GDT_SELECTOR_TLS | 3;
  88. }
  89. #else
  90. if (m_process->is_kernel_process())
  91. m_regs.cs = GDT_SELECTOR_CODE0;
  92. else
  93. m_regs.cs = GDT_SELECTOR_CODE3 | 3;
  94. #endif
  95. m_regs.cr3 = m_process->address_space().page_directory().cr3();
  96. m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
  97. m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & ~(FlatPtr)0x7u;
  98. if (m_process->is_kernel_process()) {
  99. m_regs.set_sp(m_kernel_stack_top);
  100. m_regs.set_sp0(m_kernel_stack_top);
  101. } else {
  102. // Ring 3 processes get a separate stack for ring 0.
  103. // The ring 3 stack will be assigned by exec().
  104. #if ARCH(I386)
  105. m_regs.ss0 = GDT_SELECTOR_DATA0;
  106. #endif
  107. m_regs.set_sp0(m_kernel_stack_top);
  108. }
  109. // We need to add another reference if we could successfully create
  110. // all the resources needed for this thread. The reason for this is that
  111. // we don't want to delete this thread after dropping the reference,
  112. // it may still be running or scheduled to be run.
  113. // The finalizer is responsible for dropping this reference once this
  114. // thread is ready to be cleaned up.
  115. ref();
  116. }
  117. Thread::~Thread()
  118. {
  119. {
  120. // We need to explicitly remove ourselves from the thread list
  121. // here. We may get preempted in the middle of destructing this
  122. // thread, which causes problems if the thread list is iterated.
  123. // Specifically, if this is the last thread of a process, checking
  124. // block conditions would access m_process, which would be in
  125. // the middle of being destroyed.
  126. SpinlockLocker lock(g_scheduler_lock);
  127. VERIFY(!m_process_thread_list_node.is_in_list());
  128. // We shouldn't be queued
  129. VERIFY(m_runnable_priority < 0);
  130. }
  131. }
  132. void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock, u32 lock_count)
  133. {
  134. VERIFY(!Processor::current_in_irq());
  135. VERIFY(this == Thread::current());
  136. ScopedCritical critical;
  137. VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
  138. SpinlockLocker block_lock(m_block_lock);
  139. SpinlockLocker scheduler_lock(g_scheduler_lock);
  140. switch (state()) {
  141. case Thread::Stopped:
  142. // It's possible that we were requested to be stopped!
  143. break;
  144. case Thread::Running:
  145. VERIFY(m_blocker == nullptr);
  146. break;
  147. default:
  148. VERIFY_NOT_REACHED();
  149. }
  150. // If we're blocking on the big-lock we may actually be in the process
  151. // of unblocking from another lock. If that's the case m_blocking_lock
  152. // is already set
  153. auto& big_lock = process().big_lock();
  154. VERIFY((&lock == &big_lock && m_blocking_lock != &big_lock) || !m_blocking_lock);
  155. auto previous_blocking_lock = m_blocking_lock;
  156. m_blocking_lock = &lock;
  157. m_lock_requested_count = lock_count;
  158. set_state(Thread::Blocked);
  159. scheduler_lock.unlock();
  160. block_lock.unlock();
  161. lock_lock.unlock();
  162. dbgln_if(THREAD_DEBUG, "Thread {} blocking on Mutex {}", *this, &lock);
  163. for (;;) {
  164. // Yield to the scheduler, and wait for us to resume unblocked.
  165. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  166. VERIFY(Processor::in_critical());
  167. if (&lock != &big_lock && big_lock.is_locked_by_current_thread()) {
  168. // We're locking another lock and already hold the big lock...
  169. // We need to release the big lock
  170. yield_and_release_relock_big_lock();
  171. } else {
  172. // By the time we've reached this another thread might have
  173. // marked us as holding the big lock, so this call must not
  174. // verify that we're not holding it.
  175. yield_without_releasing_big_lock(VerifyLockNotHeld::No);
  176. }
  177. VERIFY(Processor::in_critical());
  178. SpinlockLocker block_lock2(m_block_lock);
  179. if (should_be_stopped() || state() == Stopped) {
  180. dbgln("Thread should be stopped, current state: {}", state_string());
  181. set_state(Thread::Blocked);
  182. continue;
  183. }
  184. VERIFY(!m_blocking_lock);
  185. m_blocking_lock = previous_blocking_lock;
  186. break;
  187. }
  188. lock_lock.lock();
  189. }
  190. u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
  191. {
  192. SpinlockLocker block_lock(m_block_lock);
  193. VERIFY(m_blocking_lock == &lock);
  194. auto requested_count = m_lock_requested_count;
  195. block_lock.unlock();
  196. auto do_unblock = [&]() {
  197. SpinlockLocker scheduler_lock(g_scheduler_lock);
  198. SpinlockLocker block_lock(m_block_lock);
  199. VERIFY(m_blocking_lock == &lock);
  200. VERIFY(!Processor::current_in_irq());
  201. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  202. VERIFY(m_block_lock.is_locked_by_current_processor());
  203. VERIFY(m_blocking_lock == &lock);
  204. dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock);
  205. m_blocking_lock = nullptr;
  206. if (Thread::current() == this) {
  207. set_state(Thread::Running);
  208. return;
  209. }
  210. VERIFY(m_state != Thread::Runnable && m_state != Thread::Running);
  211. set_state(Thread::Runnable);
  212. };
  213. if (Processor::current_in_irq()) {
  214. Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
  215. if (auto this_thread = self.strong_ref())
  216. do_unblock();
  217. });
  218. } else {
  219. do_unblock();
  220. }
  221. return requested_count;
  222. }
  223. void Thread::unblock_from_blocker(Blocker& blocker)
  224. {
  225. auto do_unblock = [&]() {
  226. SpinlockLocker scheduler_lock(g_scheduler_lock);
  227. SpinlockLocker block_lock(m_block_lock);
  228. if (m_blocker != &blocker)
  229. return;
  230. if (!should_be_stopped() && !is_stopped())
  231. unblock();
  232. };
  233. if (Processor::current_in_irq()) {
  234. Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
  235. if (auto this_thread = self.strong_ref())
  236. do_unblock();
  237. });
  238. } else {
  239. do_unblock();
  240. }
  241. }
  242. void Thread::unblock(u8 signal)
  243. {
  244. VERIFY(!Processor::current_in_irq());
  245. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  246. VERIFY(m_block_lock.is_locked_by_current_processor());
  247. if (m_state != Thread::Blocked)
  248. return;
  249. if (m_blocking_lock)
  250. return;
  251. VERIFY(m_blocker);
  252. if (signal != 0) {
  253. if (is_handling_page_fault()) {
  254. // Don't let signals unblock threads that are blocked inside a page fault handler.
  255. // This prevents threads from EINTR'ing the inode read in an inode page fault.
  256. // FIXME: There's probably a better way to solve this.
  257. return;
  258. }
  259. if (!m_blocker->can_be_interrupted() && !m_should_die)
  260. return;
  261. m_blocker->set_interrupted_by_signal(signal);
  262. }
  263. m_blocker = nullptr;
  264. if (Thread::current() == this) {
  265. set_state(Thread::Running);
  266. return;
  267. }
  268. VERIFY(m_state != Thread::Runnable && m_state != Thread::Running);
  269. set_state(Thread::Runnable);
  270. }
  271. void Thread::set_should_die()
  272. {
  273. if (m_should_die) {
  274. dbgln("{} Should already die", *this);
  275. return;
  276. }
  277. ScopedCritical critical;
  278. // Remember that we should die instead of returning to
  279. // the userspace.
  280. SpinlockLocker lock(g_scheduler_lock);
  281. m_should_die = true;
  282. // NOTE: Even the current thread can technically be in "Stopped"
  283. // state! This is the case when another thread sent a SIGSTOP to
  284. // it while it was running and it calls e.g. exit() before
  285. // the scheduler gets involved again.
  286. if (is_stopped()) {
  287. // If we were stopped, we need to briefly resume so that
  288. // the kernel stacks can clean up. We won't ever return back
  289. // to user mode, though
  290. VERIFY(!process().is_stopped());
  291. resume_from_stopped();
  292. }
  293. if (is_blocked()) {
  294. SpinlockLocker block_lock(m_block_lock);
  295. if (m_blocker) {
  296. // We're blocked in the kernel.
  297. m_blocker->set_interrupted_by_death();
  298. unblock();
  299. }
  300. }
  301. }
  302. void Thread::die_if_needed()
  303. {
  304. VERIFY(Thread::current() == this);
  305. if (!m_should_die)
  306. return;
  307. u32 unlock_count;
  308. [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
  309. dbgln_if(THREAD_DEBUG, "Thread {} is dying", *this);
  310. {
  311. SpinlockLocker lock(g_scheduler_lock);
  312. // It's possible that we don't reach the code after this block if the
  313. // scheduler is invoked and FinalizerTask cleans up this thread, however
  314. // that doesn't matter because we're trying to invoke the scheduler anyway
  315. set_state(Thread::Dying);
  316. }
  317. ScopedCritical critical;
  318. // Flag a context switch. Because we're in a critical section,
  319. // Scheduler::yield will actually only mark a pending context switch
  320. // Simply leaving the critical section would not necessarily trigger
  321. // a switch.
  322. Scheduler::yield();
  323. // Now leave the critical section so that we can also trigger the
  324. // actual context switch
  325. Processor::clear_critical();
  326. dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current_in_irq());
  327. // We should never get here, but the scoped scheduler lock
  328. // will be released by Scheduler::context_switch again
  329. VERIFY_NOT_REACHED();
  330. }
  331. void Thread::exit(void* exit_value)
  332. {
  333. VERIFY(Thread::current() == this);
  334. m_join_blocker_set.thread_did_exit(exit_value);
  335. set_should_die();
  336. u32 unlock_count;
  337. [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
  338. if (m_thread_specific_range.has_value()) {
  339. auto* region = process().address_space().find_region_from_range(m_thread_specific_range.value());
  340. process().address_space().deallocate_region(*region);
  341. }
  342. #ifdef ENABLE_KERNEL_COVERAGE_COLLECTION
  343. KCOVDevice::free_thread();
  344. #endif
  345. die_if_needed();
  346. }
  347. void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
  348. {
  349. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  350. VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().is_locked_by_current_thread());
  351. // Disable interrupts here. This ensures we don't accidentally switch contexts twice
  352. InterruptDisabler disable;
  353. Scheduler::yield(); // flag a switch
  354. u32 prev_critical = Processor::clear_critical();
  355. // NOTE: We may be on a different CPU now!
  356. Processor::restore_critical(prev_critical);
  357. }
  358. void Thread::yield_and_release_relock_big_lock()
  359. {
  360. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  361. // Disable interrupts here. This ensures we don't accidentally switch contexts twice
  362. InterruptDisabler disable;
  363. Scheduler::yield(); // flag a switch
  364. u32 lock_count_to_restore = 0;
  365. auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
  366. // NOTE: Even though we call Scheduler::yield here, unless we happen
  367. // to be outside of a critical section, the yield will be postponed
  368. // until leaving it in relock_process.
  369. relock_process(previous_locked, lock_count_to_restore);
  370. }
  371. LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
  372. {
  373. return process().big_lock().force_unlock_if_locked(lock_count_to_restore);
  374. }
  375. void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
  376. {
  377. // Clearing the critical section may trigger the context switch
  378. // flagged by calling Scheduler::yield above.
  379. // We have to do it this way because we intentionally
  380. // leave the critical section here to be able to switch contexts.
  381. u32 prev_critical = Processor::clear_critical();
  382. // CONTEXT SWITCH HAPPENS HERE!
  383. // NOTE: We may be on a different CPU now!
  384. Processor::restore_critical(prev_critical);
  385. if (previous_locked != LockMode::Unlocked) {
  386. // We've unblocked, relock the process if needed and carry on.
  387. process().big_lock().restore_lock(previous_locked, lock_count_to_restore);
  388. }
  389. }
  390. auto Thread::sleep(clockid_t clock_id, const Time& duration, Time* remaining_time) -> BlockResult
  391. {
  392. VERIFY(state() == Thread::Running);
  393. return Thread::current()->block<Thread::SleepBlocker>({}, Thread::BlockTimeout(false, &duration, nullptr, clock_id), remaining_time);
  394. }
  395. auto Thread::sleep_until(clockid_t clock_id, const Time& deadline) -> BlockResult
  396. {
  397. VERIFY(state() == Thread::Running);
  398. return Thread::current()->block<Thread::SleepBlocker>({}, Thread::BlockTimeout(true, &deadline, nullptr, clock_id));
  399. }
  400. StringView Thread::state_string() const
  401. {
  402. switch (state()) {
  403. case Thread::Invalid:
  404. return "Invalid"sv;
  405. case Thread::Runnable:
  406. return "Runnable"sv;
  407. case Thread::Running:
  408. return "Running"sv;
  409. case Thread::Dying:
  410. return "Dying"sv;
  411. case Thread::Dead:
  412. return "Dead"sv;
  413. case Thread::Stopped:
  414. return "Stopped"sv;
  415. case Thread::Blocked: {
  416. SpinlockLocker block_lock(m_block_lock);
  417. if (m_blocking_lock)
  418. return "Mutex"sv;
  419. if (m_blocker)
  420. return m_blocker->state_string();
  421. VERIFY_NOT_REACHED();
  422. }
  423. }
  424. PANIC("Thread::state_string(): Invalid state: {}", (int)state());
  425. }
  426. void Thread::finalize()
  427. {
  428. VERIFY(Thread::current() == g_finalizer);
  429. VERIFY(Thread::current() != this);
  430. #if LOCK_DEBUG
  431. VERIFY(!m_lock.is_locked_by_current_processor());
  432. if (lock_count() > 0) {
  433. dbgln("Thread {} leaking {} Locks!", *this, lock_count());
  434. SpinlockLocker list_lock(m_holding_locks_lock);
  435. for (auto& info : m_holding_locks_list) {
  436. const auto& location = info.lock_location;
  437. dbgln(" - Mutex: \"{}\" @ {} locked in function \"{}\" at \"{}:{}\" with a count of: {}", info.lock->name(), info.lock, location.function_name(), location.filename(), location.line_number(), info.count);
  438. }
  439. VERIFY_NOT_REACHED();
  440. }
  441. #endif
  442. {
  443. SpinlockLocker lock(g_scheduler_lock);
  444. dbgln_if(THREAD_DEBUG, "Finalizing thread {}", *this);
  445. set_state(Thread::State::Dead);
  446. m_join_blocker_set.thread_finalizing();
  447. }
  448. if (m_dump_backtrace_on_finalization)
  449. dbgln("{}", backtrace());
  450. drop_thread_count(false);
  451. }
  452. void Thread::drop_thread_count(bool initializing_first_thread)
  453. {
  454. bool is_last = process().remove_thread(*this);
  455. if (!initializing_first_thread && is_last)
  456. process().finalize();
  457. }
  458. void Thread::finalize_dying_threads()
  459. {
  460. VERIFY(Thread::current() == g_finalizer);
  461. Vector<Thread*, 32> dying_threads;
  462. {
  463. SpinlockLocker lock(g_scheduler_lock);
  464. for_each_in_state(Thread::State::Dying, [&](Thread& thread) {
  465. if (thread.is_finalizable())
  466. dying_threads.append(&thread);
  467. });
  468. }
  469. for (auto* thread : dying_threads) {
  470. RefPtr<Process> process = thread->process();
  471. dbgln_if(PROCESS_DEBUG, "Before finalization, {} has {} refs and its process has {}",
  472. *thread, thread->ref_count(), thread->process().ref_count());
  473. thread->finalize();
  474. dbgln_if(PROCESS_DEBUG, "After finalization, {} has {} refs and its process has {}",
  475. *thread, thread->ref_count(), thread->process().ref_count());
  476. // This thread will never execute again, drop the running reference
  477. // NOTE: This may not necessarily drop the last reference if anything
  478. // else is still holding onto this thread!
  479. thread->unref();
  480. }
  481. }
  482. void Thread::update_time_scheduled(u64 current_scheduler_time, bool is_kernel, bool no_longer_running)
  483. {
  484. if (m_last_time_scheduled.has_value()) {
  485. u64 delta;
  486. if (current_scheduler_time >= m_last_time_scheduled.value())
  487. delta = current_scheduler_time - m_last_time_scheduled.value();
  488. else
  489. delta = m_last_time_scheduled.value() - current_scheduler_time; // the unlikely event that the clock wrapped
  490. if (delta != 0) {
  491. // Add it to the global total *before* updating the thread's value!
  492. Scheduler::add_time_scheduled(delta, is_kernel);
  493. auto& total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user;
  494. SpinlockLocker scheduler_lock(g_scheduler_lock);
  495. total_time += delta;
  496. }
  497. }
  498. if (no_longer_running)
  499. m_last_time_scheduled = {};
  500. else
  501. m_last_time_scheduled = current_scheduler_time;
  502. }
  503. bool Thread::tick()
  504. {
  505. if (previous_mode() == PreviousMode::KernelMode) {
  506. ++m_process->m_ticks_in_kernel;
  507. ++m_ticks_in_kernel;
  508. } else {
  509. ++m_process->m_ticks_in_user;
  510. ++m_ticks_in_user;
  511. }
  512. return --m_ticks_left;
  513. }
  514. void Thread::check_dispatch_pending_signal()
  515. {
  516. auto result = DispatchSignalResult::Continue;
  517. {
  518. SpinlockLocker scheduler_lock(g_scheduler_lock);
  519. if (pending_signals_for_state()) {
  520. SpinlockLocker lock(m_lock);
  521. result = dispatch_one_pending_signal();
  522. }
  523. }
  524. if (result == DispatchSignalResult::Yield) {
  525. yield_without_releasing_big_lock();
  526. }
  527. }
  528. u32 Thread::pending_signals() const
  529. {
  530. SpinlockLocker lock(g_scheduler_lock);
  531. return pending_signals_for_state();
  532. }
  533. u32 Thread::pending_signals_for_state() const
  534. {
  535. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  536. constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
  537. if (is_handling_page_fault())
  538. return 0;
  539. return m_state != Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask;
  540. }
  541. void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
  542. {
  543. VERIFY(signal < 32);
  544. SpinlockLocker scheduler_lock(g_scheduler_lock);
  545. // FIXME: Figure out what to do for masked signals. Should we also ignore them here?
  546. if (should_ignore_signal(signal)) {
  547. dbgln_if(SIGNAL_DEBUG, "Signal {} was ignored by {}", signal, process());
  548. return;
  549. }
  550. if constexpr (SIGNAL_DEBUG) {
  551. if (sender)
  552. dbgln("Signal: {} sent {} to {}", *sender, signal, process());
  553. else
  554. dbgln("Signal: Kernel send {} to {}", signal, process());
  555. }
  556. m_pending_signals |= 1 << (signal - 1);
  557. m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
  558. if (m_state == Stopped) {
  559. SpinlockLocker lock(m_lock);
  560. if (pending_signals_for_state()) {
  561. dbgln_if(SIGNAL_DEBUG, "Signal: Resuming stopped {} to deliver signal {}", *this, signal);
  562. resume_from_stopped();
  563. }
  564. } else {
  565. SpinlockLocker block_lock(m_block_lock);
  566. dbgln_if(SIGNAL_DEBUG, "Signal: Unblocking {} to deliver signal {}", *this, signal);
  567. unblock(signal);
  568. }
  569. }
  570. u32 Thread::update_signal_mask(u32 signal_mask)
  571. {
  572. SpinlockLocker lock(g_scheduler_lock);
  573. auto previous_signal_mask = m_signal_mask;
  574. m_signal_mask = signal_mask;
  575. m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
  576. return previous_signal_mask;
  577. }
  578. u32 Thread::signal_mask() const
  579. {
  580. SpinlockLocker lock(g_scheduler_lock);
  581. return m_signal_mask;
  582. }
  583. u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
  584. {
  585. SpinlockLocker lock(g_scheduler_lock);
  586. auto previous_signal_mask = m_signal_mask;
  587. if (block)
  588. m_signal_mask &= ~signal_set;
  589. else
  590. m_signal_mask |= signal_set;
  591. m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
  592. return previous_signal_mask;
  593. }
  594. void Thread::clear_signals()
  595. {
  596. SpinlockLocker lock(g_scheduler_lock);
  597. m_signal_mask = 0;
  598. m_pending_signals = 0;
  599. m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release);
  600. m_signal_action_data.fill({});
  601. }
  602. // Certain exceptions, such as SIGSEGV and SIGILL, put a
  603. // thread into a state where the signal handler must be
  604. // invoked immediately, otherwise it will continue to fault.
  605. // This function should be used in an exception handler to
  606. // ensure that when the thread resumes, it's executing in
  607. // the appropriate signal handler.
  608. void Thread::send_urgent_signal_to_self(u8 signal)
  609. {
  610. VERIFY(Thread::current() == this);
  611. DispatchSignalResult result;
  612. {
  613. SpinlockLocker lock(g_scheduler_lock);
  614. result = dispatch_signal(signal);
  615. }
  616. if (result == DispatchSignalResult::Yield)
  617. yield_and_release_relock_big_lock();
  618. }
  619. DispatchSignalResult Thread::dispatch_one_pending_signal()
  620. {
  621. VERIFY(m_lock.is_locked_by_current_processor());
  622. u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
  623. if (signal_candidates == 0)
  624. return DispatchSignalResult::Continue;
  625. u8 signal = 1;
  626. for (; signal < 32; ++signal) {
  627. if (signal_candidates & (1 << (signal - 1))) {
  628. break;
  629. }
  630. }
  631. return dispatch_signal(signal);
  632. }
  633. DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal)
  634. {
  635. VERIFY(signal != 0);
  636. SpinlockLocker scheduler_lock(g_scheduler_lock);
  637. SpinlockLocker lock(m_lock);
  638. u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
  639. if (!(signal_candidates & (1 << (signal - 1))))
  640. return DispatchSignalResult::Continue;
  641. return dispatch_signal(signal);
  642. }
  643. enum class DefaultSignalAction {
  644. Terminate,
  645. Ignore,
  646. DumpCore,
  647. Stop,
  648. Continue,
  649. };
  650. static DefaultSignalAction default_signal_action(u8 signal)
  651. {
  652. VERIFY(signal && signal < NSIG);
  653. switch (signal) {
  654. case SIGHUP:
  655. case SIGINT:
  656. case SIGKILL:
  657. case SIGPIPE:
  658. case SIGALRM:
  659. case SIGUSR1:
  660. case SIGUSR2:
  661. case SIGVTALRM:
  662. case SIGSTKFLT:
  663. case SIGIO:
  664. case SIGPROF:
  665. case SIGTERM:
  666. return DefaultSignalAction::Terminate;
  667. case SIGCHLD:
  668. case SIGURG:
  669. case SIGWINCH:
  670. case SIGINFO:
  671. return DefaultSignalAction::Ignore;
  672. case SIGQUIT:
  673. case SIGILL:
  674. case SIGTRAP:
  675. case SIGABRT:
  676. case SIGBUS:
  677. case SIGFPE:
  678. case SIGSEGV:
  679. case SIGXCPU:
  680. case SIGXFSZ:
  681. case SIGSYS:
  682. return DefaultSignalAction::DumpCore;
  683. case SIGCONT:
  684. return DefaultSignalAction::Continue;
  685. case SIGSTOP:
  686. case SIGTSTP:
  687. case SIGTTIN:
  688. case SIGTTOU:
  689. return DefaultSignalAction::Stop;
  690. default:
  691. VERIFY_NOT_REACHED();
  692. }
  693. }
  694. bool Thread::should_ignore_signal(u8 signal) const
  695. {
  696. VERIFY(signal < 32);
  697. auto& action = m_signal_action_data[signal];
  698. if (action.handler_or_sigaction.is_null())
  699. return default_signal_action(signal) == DefaultSignalAction::Ignore;
  700. if ((sighandler_t)action.handler_or_sigaction.get() == SIG_IGN)
  701. return true;
  702. return false;
  703. }
  704. bool Thread::has_signal_handler(u8 signal) const
  705. {
  706. VERIFY(signal < 32);
  707. auto& action = m_signal_action_data[signal];
  708. return !action.handler_or_sigaction.is_null();
  709. }
  710. static bool push_value_on_user_stack(FlatPtr& stack, FlatPtr data)
  711. {
  712. stack -= sizeof(FlatPtr);
  713. return copy_to_user((FlatPtr*)stack, &data);
  714. }
  715. void Thread::resume_from_stopped()
  716. {
  717. VERIFY(is_stopped());
  718. VERIFY(m_stop_state != State::Invalid);
  719. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  720. if (m_stop_state == Blocked) {
  721. SpinlockLocker block_lock(m_block_lock);
  722. if (m_blocker || m_blocking_lock) {
  723. // Hasn't been unblocked yet
  724. set_state(Blocked, 0);
  725. } else {
  726. // Was unblocked while stopped
  727. set_state(Runnable);
  728. }
  729. } else {
  730. set_state(m_stop_state, 0);
  731. }
  732. }
  733. DispatchSignalResult Thread::dispatch_signal(u8 signal)
  734. {
  735. VERIFY_INTERRUPTS_DISABLED();
  736. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  737. VERIFY(signal > 0 && signal <= 32);
  738. VERIFY(process().is_user_process());
  739. VERIFY(this == Thread::current());
  740. dbgln_if(SIGNAL_DEBUG, "Dispatch signal {} to {}, state: {}", signal, *this, state_string());
  741. if (m_state == Invalid || !is_initialized()) {
  742. // Thread has barely been created, we need to wait until it is
  743. // at least in Runnable state and is_initialized() returns true,
  744. // which indicates that it is fully set up an we actually have
  745. // a register state on the stack that we can modify
  746. return DispatchSignalResult::Deferred;
  747. }
  748. VERIFY(previous_mode() == PreviousMode::UserMode);
  749. auto& action = m_signal_action_data[signal];
  750. // FIXME: Implement SA_SIGINFO signal handlers.
  751. VERIFY(!(action.flags & SA_SIGINFO));
  752. // Mark this signal as handled.
  753. m_pending_signals &= ~(1 << (signal - 1));
  754. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  755. auto& process = this->process();
  756. auto tracer = process.tracer();
  757. if (signal == SIGSTOP || (tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
  758. dbgln_if(SIGNAL_DEBUG, "Signal {} stopping this thread", signal);
  759. set_state(State::Stopped, signal);
  760. return DispatchSignalResult::Yield;
  761. }
  762. if (signal == SIGCONT) {
  763. dbgln("signal: SIGCONT resuming {}", *this);
  764. } else {
  765. if (tracer) {
  766. // when a thread is traced, it should be stopped whenever it receives a signal
  767. // the tracer is notified of this by using waitpid()
  768. // only "pending signals" from the tracer are sent to the tracee
  769. if (!tracer->has_pending_signal(signal)) {
  770. dbgln("signal: {} stopping {} for tracer", signal, *this);
  771. set_state(Stopped, signal);
  772. return DispatchSignalResult::Yield;
  773. }
  774. tracer->unset_signal(signal);
  775. }
  776. }
  777. auto handler_vaddr = action.handler_or_sigaction;
  778. if (handler_vaddr.is_null()) {
  779. switch (default_signal_action(signal)) {
  780. case DefaultSignalAction::Stop:
  781. set_state(Stopped, signal);
  782. return DispatchSignalResult::Yield;
  783. case DefaultSignalAction::DumpCore:
  784. process.set_should_generate_coredump(true);
  785. process.for_each_thread([](auto& thread) {
  786. thread.set_dump_backtrace_on_finalization();
  787. });
  788. [[fallthrough]];
  789. case DefaultSignalAction::Terminate:
  790. m_process->terminate_due_to_signal(signal);
  791. return DispatchSignalResult::Terminate;
  792. case DefaultSignalAction::Ignore:
  793. VERIFY_NOT_REACHED();
  794. case DefaultSignalAction::Continue:
  795. return DispatchSignalResult::Continue;
  796. }
  797. VERIFY_NOT_REACHED();
  798. }
  799. if ((sighandler_t)handler_vaddr.as_ptr() == SIG_IGN) {
  800. dbgln_if(SIGNAL_DEBUG, "Ignored signal {}", signal);
  801. return DispatchSignalResult::Continue;
  802. }
  803. VERIFY(previous_mode() == PreviousMode::UserMode);
  804. VERIFY(current_trap());
  805. ProcessPagingScope paging_scope(m_process);
  806. u32 old_signal_mask = m_signal_mask;
  807. u32 new_signal_mask = action.mask;
  808. if (action.flags & SA_NODEFER)
  809. new_signal_mask &= ~(1 << (signal - 1));
  810. else
  811. new_signal_mask |= 1 << (signal - 1);
  812. m_signal_mask |= new_signal_mask;
  813. m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
  814. auto setup_stack = [&](RegisterState& state) {
  815. FlatPtr stack = state.userspace_sp();
  816. FlatPtr old_sp = stack;
  817. FlatPtr ret_ip = state.ip();
  818. FlatPtr ret_flags = state.flags();
  819. dbgln_if(SIGNAL_DEBUG, "Setting up user stack to return to IP {:p}, SP {:p}", ret_ip, old_sp);
  820. #if ARCH(I386)
  821. // Align the stack to 16 bytes.
  822. // Note that we push 56 bytes (4 * 14) on to the stack,
  823. // so we need to account for this here.
  824. // 56 % 16 = 8, so we only need to take 8 bytes into consideration for
  825. // the stack alignment.
  826. FlatPtr stack_alignment = (stack - 8) % 16;
  827. stack -= stack_alignment;
  828. push_value_on_user_stack(stack, ret_flags);
  829. push_value_on_user_stack(stack, ret_ip);
  830. push_value_on_user_stack(stack, state.eax);
  831. push_value_on_user_stack(stack, state.ecx);
  832. push_value_on_user_stack(stack, state.edx);
  833. push_value_on_user_stack(stack, state.ebx);
  834. push_value_on_user_stack(stack, old_sp);
  835. push_value_on_user_stack(stack, state.ebp);
  836. push_value_on_user_stack(stack, state.esi);
  837. push_value_on_user_stack(stack, state.edi);
  838. #else
  839. // Align the stack to 16 bytes.
  840. // Note that we push 176 bytes (8 * 22) on to the stack,
  841. // so we need to account for this here.
  842. // 22 % 2 = 0, so we dont need to take anything into consideration
  843. // for the alignment.
  844. // We also are not allowed to touch the thread's red-zone of 128 bytes
  845. FlatPtr stack_alignment = stack % 16;
  846. stack -= 128 + stack_alignment;
  847. push_value_on_user_stack(stack, ret_flags);
  848. push_value_on_user_stack(stack, ret_ip);
  849. push_value_on_user_stack(stack, state.r15);
  850. push_value_on_user_stack(stack, state.r14);
  851. push_value_on_user_stack(stack, state.r13);
  852. push_value_on_user_stack(stack, state.r12);
  853. push_value_on_user_stack(stack, state.r11);
  854. push_value_on_user_stack(stack, state.r10);
  855. push_value_on_user_stack(stack, state.r9);
  856. push_value_on_user_stack(stack, state.r8);
  857. push_value_on_user_stack(stack, state.rax);
  858. push_value_on_user_stack(stack, state.rcx);
  859. push_value_on_user_stack(stack, state.rdx);
  860. push_value_on_user_stack(stack, state.rbx);
  861. push_value_on_user_stack(stack, old_sp);
  862. push_value_on_user_stack(stack, state.rbp);
  863. push_value_on_user_stack(stack, state.rsi);
  864. push_value_on_user_stack(stack, state.rdi);
  865. #endif
  866. // PUSH old_signal_mask
  867. push_value_on_user_stack(stack, old_signal_mask);
  868. push_value_on_user_stack(stack, signal);
  869. push_value_on_user_stack(stack, handler_vaddr.get());
  870. push_value_on_user_stack(stack, 0); // push fake return address
  871. // We write back the adjusted stack value into the register state.
  872. // We have to do this because we can't just pass around a reference to a packed field, as it's UB.
  873. state.set_userspace_sp(stack);
  874. VERIFY((stack % 16) == 0);
  875. };
  876. // We now place the thread state on the userspace stack.
  877. // Note that we use a RegisterState.
  878. // Conversely, when the thread isn't blocking the RegisterState may not be
  879. // valid (fork, exec etc) but the tss will, so we use that instead.
  880. auto& regs = get_register_dump_from_stack();
  881. setup_stack(regs);
  882. auto signal_trampoline_addr = process.signal_trampoline().get();
  883. regs.set_ip(signal_trampoline_addr);
  884. dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:#04x}:{:p} to deliver {}", state_string(), m_regs.cs, m_regs.ip(), signal);
  885. return DispatchSignalResult::Continue;
  886. }
  887. RegisterState& Thread::get_register_dump_from_stack()
  888. {
  889. auto* trap = current_trap();
  890. // We should *always* have a trap. If we don't we're probably a kernel
  891. // thread that hasn't been preempted. If we want to support this, we
  892. // need to capture the registers probably into m_regs and return it
  893. VERIFY(trap);
  894. while (trap) {
  895. if (!trap->next_trap)
  896. break;
  897. trap = trap->next_trap;
  898. }
  899. return *trap->regs;
  900. }
  901. KResultOr<NonnullRefPtr<Thread>> Thread::try_clone(Process& process)
  902. {
  903. auto clone = TRY(Thread::try_create(process));
  904. auto signal_action_data_span = m_signal_action_data.span();
  905. signal_action_data_span.copy_to(clone->m_signal_action_data.span());
  906. clone->m_signal_mask = m_signal_mask;
  907. clone->m_fpu_state = m_fpu_state;
  908. clone->m_thread_specific_data = m_thread_specific_data;
  909. return clone;
  910. }
  911. void Thread::set_state(State new_state, u8 stop_signal)
  912. {
  913. State previous_state;
  914. VERIFY(g_scheduler_lock.is_locked_by_current_processor());
  915. if (new_state == m_state)
  916. return;
  917. {
  918. SpinlockLocker thread_lock(m_lock);
  919. previous_state = m_state;
  920. if (previous_state == Invalid) {
  921. // If we were *just* created, we may have already pending signals
  922. if (has_unmasked_pending_signals()) {
  923. dbgln_if(THREAD_DEBUG, "Dispatch pending signals to new thread {}", *this);
  924. dispatch_one_pending_signal();
  925. }
  926. }
  927. m_state = new_state;
  928. dbgln_if(THREAD_DEBUG, "Set thread {} state to {}", *this, state_string());
  929. }
  930. if (previous_state == Runnable) {
  931. Scheduler::dequeue_runnable_thread(*this);
  932. } else if (previous_state == Stopped) {
  933. m_stop_state = State::Invalid;
  934. auto& process = this->process();
  935. if (process.set_stopped(false) == true) {
  936. process.for_each_thread([&](auto& thread) {
  937. if (&thread == this)
  938. return;
  939. if (!thread.is_stopped())
  940. return;
  941. dbgln_if(THREAD_DEBUG, "Resuming peer thread {}", thread);
  942. thread.resume_from_stopped();
  943. });
  944. process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Continued);
  945. // Tell the parent process (if any) about this change.
  946. if (auto parent = Process::from_pid(process.ppid())) {
  947. [[maybe_unused]] auto result = parent->send_signal(SIGCHLD, &process);
  948. }
  949. }
  950. }
  951. if (m_state == Runnable) {
  952. Scheduler::enqueue_runnable_thread(*this);
  953. Processor::smp_wake_n_idle_processors(1);
  954. } else if (m_state == Stopped) {
  955. // We don't want to restore to Running state, only Runnable!
  956. m_stop_state = previous_state != Running ? previous_state : Runnable;
  957. auto& process = this->process();
  958. if (process.set_stopped(true) == false) {
  959. process.for_each_thread([&](auto& thread) {
  960. if (&thread == this)
  961. return;
  962. if (thread.is_stopped())
  963. return;
  964. dbgln_if(THREAD_DEBUG, "Stopping peer thread {}", thread);
  965. thread.set_state(Stopped, stop_signal);
  966. });
  967. process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal);
  968. // Tell the parent process (if any) about this change.
  969. if (auto parent = Process::from_pid(process.ppid())) {
  970. [[maybe_unused]] auto result = parent->send_signal(SIGCHLD, &process);
  971. }
  972. }
  973. } else if (m_state == Dying) {
  974. VERIFY(previous_state != Blocked);
  975. if (this != Thread::current() && is_finalizable()) {
  976. // Some other thread set this thread to Dying, notify the
  977. // finalizer right away as it can be cleaned up now
  978. Scheduler::notify_finalizer();
  979. }
  980. }
  981. }
  982. struct RecognizedSymbol {
  983. FlatPtr address;
  984. const KernelSymbol* symbol { nullptr };
  985. };
  986. static bool symbolicate(RecognizedSymbol const& symbol, Process& process, StringBuilder& builder)
  987. {
  988. if (!symbol.address)
  989. return false;
  990. bool mask_kernel_addresses = !process.is_superuser();
  991. if (!symbol.symbol) {
  992. if (!Memory::is_user_address(VirtualAddress(symbol.address))) {
  993. builder.append("0xdeadc0de\n");
  994. } else {
  995. if (auto* region = process.address_space().find_region_containing({ VirtualAddress(symbol.address), sizeof(FlatPtr) })) {
  996. size_t offset = symbol.address - region->vaddr().get();
  997. if (auto region_name = region->name(); !region_name.is_null() && !region_name.is_empty())
  998. builder.appendff("{:p} {} + {:#x}\n", (void*)symbol.address, region_name, offset);
  999. else
  1000. builder.appendff("{:p} {:p} + {:#x}\n", (void*)symbol.address, region->vaddr().as_ptr(), offset);
  1001. } else {
  1002. builder.appendff("{:p}\n", symbol.address);
  1003. }
  1004. }
  1005. return true;
  1006. }
  1007. unsigned offset = symbol.address - symbol.symbol->address;
  1008. if (symbol.symbol->address == g_highest_kernel_symbol_address && offset > 4096) {
  1009. builder.appendff("{:p}\n", (void*)(mask_kernel_addresses ? 0xdeadc0de : symbol.address));
  1010. } else {
  1011. builder.appendff("{:p} {} + {:#x}\n", (void*)(mask_kernel_addresses ? 0xdeadc0de : symbol.address), symbol.symbol->name, offset);
  1012. }
  1013. return true;
  1014. }
  1015. String Thread::backtrace()
  1016. {
  1017. Vector<RecognizedSymbol, 128> recognized_symbols;
  1018. auto& process = const_cast<Process&>(this->process());
  1019. auto stack_trace = Processor::capture_stack_trace(*this);
  1020. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  1021. ProcessPagingScope paging_scope(process);
  1022. for (auto& frame : stack_trace) {
  1023. if (Memory::is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {
  1024. recognized_symbols.append({ frame });
  1025. } else {
  1026. recognized_symbols.append({ frame, symbolicate_kernel_address(frame) });
  1027. }
  1028. }
  1029. StringBuilder builder;
  1030. for (auto& symbol : recognized_symbols) {
  1031. if (!symbolicate(symbol, process, builder))
  1032. break;
  1033. }
  1034. return builder.to_string();
  1035. }
  1036. size_t Thread::thread_specific_region_alignment() const
  1037. {
  1038. return max(process().m_master_tls_alignment, alignof(ThreadSpecificData));
  1039. }
  1040. size_t Thread::thread_specific_region_size() const
  1041. {
  1042. return align_up_to(process().m_master_tls_size, thread_specific_region_alignment()) + sizeof(ThreadSpecificData);
  1043. }
  1044. KResult Thread::make_thread_specific_region(Badge<Process>)
  1045. {
  1046. // The process may not require a TLS region, or allocate TLS later with sys$allocate_tls (which is what dynamically loaded programs do)
  1047. if (!process().m_master_tls_region)
  1048. return KSuccess;
  1049. auto range = process().address_space().allocate_range({}, thread_specific_region_size());
  1050. if (!range.has_value())
  1051. return ENOMEM;
  1052. auto* region = TRY(process().address_space().allocate_region(range.value(), "Thread-specific", PROT_READ | PROT_WRITE));
  1053. m_thread_specific_range = range.value();
  1054. SmapDisabler disabler;
  1055. auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment())).as_ptr();
  1056. auto* thread_local_storage = (u8*)((u8*)thread_specific_data) - align_up_to(process().m_master_tls_size, process().m_master_tls_alignment);
  1057. m_thread_specific_data = VirtualAddress(thread_specific_data);
  1058. thread_specific_data->self = thread_specific_data;
  1059. if (process().m_master_tls_size)
  1060. memcpy(thread_local_storage, process().m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), process().m_master_tls_size);
  1061. return KSuccess;
  1062. }
  1063. RefPtr<Thread> Thread::from_tid(ThreadID tid)
  1064. {
  1065. return Thread::all_instances().with([&](auto& list) -> RefPtr<Thread> {
  1066. for (Thread& thread : list) {
  1067. if (thread.tid() == tid)
  1068. return thread;
  1069. }
  1070. return nullptr;
  1071. });
  1072. }
  1073. void Thread::reset_fpu_state()
  1074. {
  1075. memcpy(&m_fpu_state, &Processor::clean_fpu_state(), sizeof(FPUState));
  1076. }
  1077. bool Thread::should_be_stopped() const
  1078. {
  1079. return process().is_stopped();
  1080. }
  1081. }
  1082. void AK::Formatter<Kernel::Thread>::format(FormatBuilder& builder, const Kernel::Thread& value)
  1083. {
  1084. return AK::Formatter<FormatString>::format(
  1085. builder,
  1086. "{}({}:{})", value.process().name(), value.pid().value(), value.tid().value());
  1087. }