Thread.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #pragma once
  27. #include <AK/Function.h>
  28. #include <AK/IntrusiveList.h>
  29. #include <AK/Optional.h>
  30. #include <AK/OwnPtr.h>
  31. #include <AK/String.h>
  32. #include <AK/Vector.h>
  33. #include <Kernel/Arch/i386/CPU.h>
  34. #include <Kernel/Forward.h>
  35. #include <Kernel/KResult.h>
  36. #include <Kernel/Scheduler.h>
  37. #include <Kernel/ThreadTracer.h>
  38. #include <Kernel/UnixTypes.h>
  39. #include <LibC/fd_set.h>
  40. namespace Kernel {
  41. enum class ShouldUnblockThread {
  42. No = 0,
  43. Yes
  44. };
  45. struct SignalActionData {
  46. VirtualAddress handler_or_sigaction;
  47. u32 mask { 0 };
  48. int flags { 0 };
  49. };
  50. struct ThreadSpecificData {
  51. ThreadSpecificData* self;
  52. };
  53. #define THREAD_PRIORITY_MIN 1
  54. #define THREAD_PRIORITY_LOW 10
  55. #define THREAD_PRIORITY_NORMAL 30
  56. #define THREAD_PRIORITY_HIGH 50
  57. #define THREAD_PRIORITY_MAX 99
  58. class Thread {
  59. AK_MAKE_NONCOPYABLE(Thread);
  60. AK_MAKE_NONMOVABLE(Thread);
  61. friend class Process;
  62. friend class Scheduler;
  63. public:
  64. static Thread* current;
  65. explicit Thread(Process&);
  66. ~Thread();
  67. static Thread* from_tid(int);
  68. static void initialize();
  69. static void finalize_dying_threads();
  70. static Vector<Thread*> all_threads();
  71. static bool is_thread(void*);
  72. int tid() const { return m_tid; }
  73. int pid() const;
  74. void set_priority(u32 p) { m_priority = p; }
  75. u32 priority() const { return m_priority; }
  76. void set_priority_boost(u32 boost) { m_priority_boost = boost; }
  77. u32 priority_boost() const { return m_priority_boost; }
  78. u32 effective_priority() const;
  79. void set_joinable(bool j) { m_is_joinable = j; }
  80. bool is_joinable() const { return m_is_joinable; }
  81. Process& process() { return m_process; }
  82. const Process& process() const { return m_process; }
  83. String backtrace(ProcessInspectionHandle&) const;
  84. Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
  85. const String& name() const { return m_name; }
  86. void set_name(const StringView& s) { m_name = s; }
  87. void finalize();
  88. enum State : u8 {
  89. Invalid = 0,
  90. Runnable,
  91. Running,
  92. Skip1SchedulerPass,
  93. Skip0SchedulerPasses,
  94. Dying,
  95. Dead,
  96. Stopped,
  97. Blocked,
  98. Queued,
  99. };
  100. class Blocker {
  101. public:
  102. virtual ~Blocker() {}
  103. virtual bool should_unblock(Thread&, time_t now_s, long us) = 0;
  104. virtual const char* state_string() const = 0;
  105. virtual bool is_reason_signal() const { return false; }
  106. void set_interrupted_by_death() { m_was_interrupted_by_death = true; }
  107. bool was_interrupted_by_death() const { return m_was_interrupted_by_death; }
  108. void set_interrupted_by_signal() { m_was_interrupted_while_blocked = true; }
  109. bool was_interrupted_by_signal() const { return m_was_interrupted_while_blocked; }
  110. private:
  111. bool m_was_interrupted_while_blocked { false };
  112. bool m_was_interrupted_by_death { false };
  113. friend class Thread;
  114. };
  115. class JoinBlocker final : public Blocker {
  116. public:
  117. explicit JoinBlocker(Thread& joinee, void*& joinee_exit_value);
  118. virtual bool should_unblock(Thread&, time_t now_s, long us) override;
  119. virtual const char* state_string() const override { return "Joining"; }
  120. void set_joinee_exit_value(void* value) { m_joinee_exit_value = value; }
  121. private:
  122. Thread& m_joinee;
  123. void*& m_joinee_exit_value;
  124. };
  125. class FileDescriptionBlocker : public Blocker {
  126. public:
  127. const FileDescription& blocked_description() const;
  128. protected:
  129. explicit FileDescriptionBlocker(const FileDescription&);
  130. private:
  131. NonnullRefPtr<FileDescription> m_blocked_description;
  132. };
  133. class AcceptBlocker final : public FileDescriptionBlocker {
  134. public:
  135. explicit AcceptBlocker(const FileDescription&);
  136. virtual bool should_unblock(Thread&, time_t, long) override;
  137. virtual const char* state_string() const override { return "Accepting"; }
  138. };
  139. class ConnectBlocker final : public FileDescriptionBlocker {
  140. public:
  141. explicit ConnectBlocker(const FileDescription&);
  142. virtual bool should_unblock(Thread&, time_t, long) override;
  143. virtual const char* state_string() const override { return "Connecting"; }
  144. };
  145. class WriteBlocker final : public FileDescriptionBlocker {
  146. public:
  147. explicit WriteBlocker(const FileDescription&);
  148. virtual bool should_unblock(Thread&, time_t, long) override;
  149. virtual const char* state_string() const override { return "Writing"; }
  150. private:
  151. Optional<timeval> m_deadline;
  152. };
  153. class ReadBlocker final : public FileDescriptionBlocker {
  154. public:
  155. explicit ReadBlocker(const FileDescription&);
  156. virtual bool should_unblock(Thread&, time_t, long) override;
  157. virtual const char* state_string() const override { return "Reading"; }
  158. private:
  159. Optional<timeval> m_deadline;
  160. };
  161. class ConditionBlocker final : public Blocker {
  162. public:
  163. ConditionBlocker(const char* state_string, Function<bool()>&& condition);
  164. virtual bool should_unblock(Thread&, time_t, long) override;
  165. virtual const char* state_string() const override { return m_state_string; }
  166. private:
  167. Function<bool()> m_block_until_condition;
  168. const char* m_state_string { nullptr };
  169. };
  170. class SleepBlocker final : public Blocker {
  171. public:
  172. explicit SleepBlocker(u64 wakeup_time);
  173. virtual bool should_unblock(Thread&, time_t, long) override;
  174. virtual const char* state_string() const override { return "Sleeping"; }
  175. private:
  176. u64 m_wakeup_time { 0 };
  177. };
  178. class SelectBlocker final : public Blocker {
  179. public:
  180. typedef Vector<int, FD_SETSIZE> FDVector;
  181. SelectBlocker(const timeval& tv, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
  182. virtual bool should_unblock(Thread&, time_t, long) override;
  183. virtual const char* state_string() const override { return "Selecting"; }
  184. private:
  185. timeval m_select_timeout;
  186. bool m_select_has_timeout { false };
  187. const FDVector& m_select_read_fds;
  188. const FDVector& m_select_write_fds;
  189. const FDVector& m_select_exceptional_fds;
  190. };
  191. class WaitBlocker final : public Blocker {
  192. public:
  193. WaitBlocker(int wait_options, pid_t& waitee_pid);
  194. virtual bool should_unblock(Thread&, time_t, long) override;
  195. virtual const char* state_string() const override { return "Waiting"; }
  196. private:
  197. int m_wait_options { 0 };
  198. pid_t& m_waitee_pid;
  199. };
  200. class SemiPermanentBlocker final : public Blocker {
  201. public:
  202. enum class Reason {
  203. Signal,
  204. };
  205. SemiPermanentBlocker(Reason reason);
  206. virtual bool should_unblock(Thread&, time_t, long) override;
  207. virtual const char* state_string() const override
  208. {
  209. switch (m_reason) {
  210. case Reason::Signal:
  211. return "Signal";
  212. }
  213. ASSERT_NOT_REACHED();
  214. }
  215. virtual bool is_reason_signal() const override { return m_reason == Reason::Signal; }
  216. private:
  217. Reason m_reason;
  218. };
  219. void did_schedule() { ++m_times_scheduled; }
  220. u32 times_scheduled() const { return m_times_scheduled; }
  221. bool is_stopped() const { return m_state == Stopped; }
  222. bool is_blocked() const { return m_state == Blocked; }
  223. bool has_blocker() const { return m_blocker != nullptr; }
  224. const Blocker& blocker() const;
  225. bool in_kernel() const { return (m_tss.cs & 0x03) == 0; }
  226. u32 frame_ptr() const { return m_tss.ebp; }
  227. u32 stack_ptr() const { return m_tss.esp; }
  228. RegisterState& get_register_dump_from_stack();
  229. u16 selector() const { return m_far_ptr.selector; }
  230. TSS32& tss() { return m_tss; }
  231. const TSS32& tss() const { return m_tss; }
  232. State state() const { return m_state; }
  233. const char* state_string() const;
  234. u32 ticks() const { return m_ticks; }
  235. VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
  236. u64 sleep(u32 ticks);
  237. u64 sleep_until(u64 wakeup_time);
  238. enum class BlockResult {
  239. WokeNormally,
  240. InterruptedBySignal,
  241. InterruptedByDeath,
  242. InterruptedByTimeout,
  243. };
  244. template<typename T, class... Args>
  245. [[nodiscard]] BlockResult block(Args&&... args)
  246. {
  247. // We should never be blocking a blocked (or otherwise non-active) thread.
  248. ASSERT(state() == Thread::Running);
  249. ASSERT(m_blocker == nullptr);
  250. T t(forward<Args>(args)...);
  251. m_blocker = &t;
  252. set_state(Thread::Blocked);
  253. // Yield to the scheduler, and wait for us to resume unblocked.
  254. yield_without_holding_big_lock();
  255. // We should no longer be blocked once we woke up
  256. ASSERT(state() != Thread::Blocked);
  257. // Remove ourselves...
  258. m_blocker = nullptr;
  259. if (t.was_interrupted_by_signal())
  260. return BlockResult::InterruptedBySignal;
  261. if (t.was_interrupted_by_death())
  262. return BlockResult::InterruptedByDeath;
  263. return BlockResult::WokeNormally;
  264. }
  265. [[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
  266. {
  267. return block<ConditionBlocker>(state_string, move(condition));
  268. }
  269. BlockResult wait_on(WaitQueue& queue, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr, const char* reason = nullptr);
  270. void wake_from_queue();
  271. void unblock();
  272. // Tell this thread to unblock if needed,
  273. // gracefully unwind the stack and die.
  274. void set_should_die();
  275. void die_if_needed();
  276. const FarPtr& far_ptr() const { return m_far_ptr; }
  277. bool tick();
  278. void set_ticks_left(u32 t) { m_ticks_left = t; }
  279. u32 ticks_left() const { return m_ticks_left; }
  280. u32 kernel_stack_base() const { return m_kernel_stack_base; }
  281. u32 kernel_stack_top() const { return m_kernel_stack_top; }
  282. void set_selector(u16 s) { m_far_ptr.selector = s; }
  283. void set_state(State);
  284. void send_urgent_signal_to_self(u8 signal);
  285. void send_signal(u8 signal, Process* sender);
  286. void consider_unblock(time_t now_sec, long now_usec);
  287. void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
  288. ShouldUnblockThread dispatch_one_pending_signal();
  289. ShouldUnblockThread dispatch_signal(u8 signal);
  290. bool has_unmasked_pending_signals() const { return m_pending_signals & ~m_signal_mask; }
  291. void terminate_due_to_signal(u8 signal);
  292. bool should_ignore_signal(u8 signal) const;
  293. bool has_signal_handler(u8 signal) const;
  294. bool has_pending_signal(u8 signal) const { return m_pending_signals & (1 << (signal - 1)); }
  295. FPUState& fpu_state() { return *m_fpu_state; }
  296. void set_default_signal_dispositions();
  297. void push_value_on_stack(FlatPtr);
  298. u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment);
  299. void make_thread_specific_region(Badge<Process>);
  300. unsigned syscall_count() const { return m_syscall_count; }
  301. void did_syscall() { ++m_syscall_count; }
  302. unsigned inode_faults() const { return m_inode_faults; }
  303. void did_inode_fault() { ++m_inode_faults; }
  304. unsigned zero_faults() const { return m_zero_faults; }
  305. void did_zero_fault() { ++m_zero_faults; }
  306. unsigned cow_faults() const { return m_cow_faults; }
  307. void did_cow_fault() { ++m_cow_faults; }
  308. unsigned file_read_bytes() const { return m_file_read_bytes; }
  309. unsigned file_write_bytes() const { return m_file_write_bytes; }
  310. void did_file_read(unsigned bytes)
  311. {
  312. m_file_read_bytes += bytes;
  313. }
  314. void did_file_write(unsigned bytes)
  315. {
  316. m_file_write_bytes += bytes;
  317. }
  318. unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
  319. unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
  320. void did_unix_socket_read(unsigned bytes)
  321. {
  322. m_unix_socket_read_bytes += bytes;
  323. }
  324. void did_unix_socket_write(unsigned bytes)
  325. {
  326. m_unix_socket_write_bytes += bytes;
  327. }
  328. unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
  329. unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
  330. void did_ipv4_socket_read(unsigned bytes)
  331. {
  332. m_ipv4_socket_read_bytes += bytes;
  333. }
  334. void did_ipv4_socket_write(unsigned bytes)
  335. {
  336. m_ipv4_socket_write_bytes += bytes;
  337. }
  338. Thread* clone(Process&);
  339. template<typename Callback>
  340. static IterationDecision for_each_in_state(State, Callback);
  341. template<typename Callback>
  342. static IterationDecision for_each_living(Callback);
  343. template<typename Callback>
  344. static IterationDecision for_each(Callback);
  345. static bool is_runnable_state(Thread::State state)
  346. {
  347. return state == Thread::State::Running || state == Thread::State::Runnable;
  348. }
  349. static constexpr u32 default_kernel_stack_size = 65536;
  350. static constexpr u32 default_userspace_stack_size = 4 * MB;
  351. ThreadTracer* tracer() { return m_tracer.ptr(); }
  352. void start_tracing_from(pid_t tracer);
  353. void stop_tracing();
  354. void tracer_trap(const RegisterState&);
  355. private:
  356. IntrusiveListNode m_runnable_list_node;
  357. IntrusiveListNode m_wait_queue_node;
  358. private:
  359. friend class SchedulerData;
  360. friend class WaitQueue;
  361. bool unlock_process_if_locked();
  362. void relock_process();
  363. String backtrace_impl() const;
  364. void reset_fpu_state();
  365. Process& m_process;
  366. int m_tid { -1 };
  367. TSS32 m_tss;
  368. FarPtr m_far_ptr;
  369. u32 m_ticks { 0 };
  370. u32 m_ticks_left { 0 };
  371. u32 m_times_scheduled { 0 };
  372. u32 m_pending_signals { 0 };
  373. u32 m_signal_mask { 0 };
  374. u32 m_kernel_stack_base { 0 };
  375. u32 m_kernel_stack_top { 0 };
  376. OwnPtr<Region> m_kernel_stack_region;
  377. VirtualAddress m_thread_specific_data;
  378. SignalActionData m_signal_action_data[32];
  379. Blocker* m_blocker { nullptr };
  380. bool m_is_joinable { true };
  381. Thread* m_joiner { nullptr };
  382. Thread* m_joinee { nullptr };
  383. void* m_exit_value { nullptr };
  384. unsigned m_syscall_count { 0 };
  385. unsigned m_inode_faults { 0 };
  386. unsigned m_zero_faults { 0 };
  387. unsigned m_cow_faults { 0 };
  388. unsigned m_file_read_bytes { 0 };
  389. unsigned m_file_write_bytes { 0 };
  390. unsigned m_unix_socket_read_bytes { 0 };
  391. unsigned m_unix_socket_write_bytes { 0 };
  392. unsigned m_ipv4_socket_read_bytes { 0 };
  393. unsigned m_ipv4_socket_write_bytes { 0 };
  394. FPUState* m_fpu_state { nullptr };
  395. State m_state { Invalid };
  396. String m_name;
  397. u32 m_priority { THREAD_PRIORITY_NORMAL };
  398. u32 m_extra_priority { 0 };
  399. u32 m_priority_boost { 0 };
  400. u8 m_stop_signal { 0 };
  401. State m_stop_state { Invalid };
  402. bool m_dump_backtrace_on_finalization { false };
  403. bool m_should_die { false };
  404. OwnPtr<ThreadTracer> m_tracer;
  405. void yield_without_holding_big_lock();
  406. };
  407. HashTable<Thread*>& thread_table();
  408. template<typename Callback>
  409. inline IterationDecision Thread::for_each_living(Callback callback)
  410. {
  411. ASSERT_INTERRUPTS_DISABLED();
  412. return Thread::for_each([callback](Thread& thread) -> IterationDecision {
  413. if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
  414. return callback(thread);
  415. return IterationDecision::Continue;
  416. });
  417. }
  418. template<typename Callback>
  419. inline IterationDecision Thread::for_each(Callback callback)
  420. {
  421. ASSERT_INTERRUPTS_DISABLED();
  422. auto ret = Scheduler::for_each_runnable(callback);
  423. if (ret == IterationDecision::Break)
  424. return ret;
  425. return Scheduler::for_each_nonrunnable(callback);
  426. }
  427. template<typename Callback>
  428. inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
  429. {
  430. ASSERT_INTERRUPTS_DISABLED();
  431. auto new_callback = [=](Thread& thread) -> IterationDecision {
  432. if (thread.state() == state)
  433. return callback(thread);
  434. return IterationDecision::Continue;
  435. };
  436. if (is_runnable_state(state))
  437. return Scheduler::for_each_runnable(new_callback);
  438. return Scheduler::for_each_nonrunnable(new_callback);
  439. }
  440. const LogStream& operator<<(const LogStream&, const Thread&);
  441. struct SchedulerData {
  442. typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
  443. ThreadList m_runnable_threads;
  444. ThreadList m_nonrunnable_threads;
  445. ThreadList& thread_list_for_state(Thread::State state)
  446. {
  447. if (Thread::is_runnable_state(state))
  448. return m_runnable_threads;
  449. return m_nonrunnable_threads;
  450. }
  451. };
  452. template<typename Callback>
  453. inline IterationDecision Scheduler::for_each_runnable(Callback callback)
  454. {
  455. ASSERT_INTERRUPTS_DISABLED();
  456. auto& tl = g_scheduler_data->m_runnable_threads;
  457. for (auto it = tl.begin(); it != tl.end();) {
  458. auto& thread = *it;
  459. it = ++it;
  460. if (callback(thread) == IterationDecision::Break)
  461. return IterationDecision::Break;
  462. }
  463. return IterationDecision::Continue;
  464. }
  465. template<typename Callback>
  466. inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
  467. {
  468. ASSERT_INTERRUPTS_DISABLED();
  469. auto& tl = g_scheduler_data->m_nonrunnable_threads;
  470. for (auto it = tl.begin(); it != tl.end();) {
  471. auto& thread = *it;
  472. it = ++it;
  473. if (callback(thread) == IterationDecision::Break)
  474. return IterationDecision::Break;
  475. }
  476. return IterationDecision::Continue;
  477. }
  478. u16 thread_specific_selector();
  479. Descriptor& thread_specific_descriptor();
  480. }