Thread.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. #include <Kernel/Thread.h>
  2. #include <Kernel/Scheduler.h>
  3. #include <Kernel/Process.h>
  4. #include <Kernel/FileSystem/FileDescriptor.h>
  5. #include <Kernel/VM/MemoryManager.h>
  6. #include <LibC/signal_numbers.h>
  7. InlineLinkedList<Thread>* g_threads;
  8. static const dword default_kernel_stack_size = 16384;
  9. static const dword default_userspace_stack_size = 65536;
  10. Thread::Thread(Process& process)
  11. : m_process(process)
  12. , m_tid(process.m_next_tid++)
  13. {
  14. dbgprintf("Thread{%p}: New thread TID=%u in %s(%u)\n", this, m_tid, process.name().characters(), process.pid());
  15. set_default_signal_dispositions();
  16. m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
  17. memset(&m_tss, 0, sizeof(m_tss));
  18. // Only IF is set when a process boots.
  19. m_tss.eflags = 0x0202;
  20. word cs, ds, ss;
  21. if (m_process.is_ring0()) {
  22. cs = 0x08;
  23. ds = 0x10;
  24. ss = 0x10;
  25. } else {
  26. cs = 0x1b;
  27. ds = 0x23;
  28. ss = 0x23;
  29. }
  30. m_tss.ds = ds;
  31. m_tss.es = ds;
  32. m_tss.fs = ds;
  33. m_tss.gs = ds;
  34. m_tss.ss = ss;
  35. m_tss.cs = cs;
  36. m_tss.cr3 = m_process.page_directory().cr3();
  37. if (m_process.is_ring0()) {
  38. // FIXME: This memory is leaked.
  39. // But uh, there's also no kernel process termination, so I guess it's not technically leaked...
  40. dword stack_bottom = (dword)kmalloc_eternal(default_kernel_stack_size);
  41. m_tss.esp = (stack_bottom + default_kernel_stack_size) & 0xfffffff8u;
  42. } else {
  43. // Ring3 processes need a separate stack for Ring0.
  44. m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid));
  45. m_kernel_stack_region->commit();
  46. m_tss.ss0 = 0x10;
  47. m_tss.esp0 = m_kernel_stack_region->laddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
  48. }
  49. // HACK: Ring2 SS in the TSS is the current PID.
  50. m_tss.ss2 = m_process.pid();
  51. m_far_ptr.offset = 0x98765432;
  52. if (m_process.pid() != 0) {
  53. InterruptDisabler disabler;
  54. g_threads->prepend(this);
  55. }
  56. }
  57. Thread::~Thread()
  58. {
  59. dbgprintf("~Thread{%p}\n", this);
  60. kfree_aligned(m_fpu_state);
  61. {
  62. InterruptDisabler disabler;
  63. g_threads->remove(this);
  64. }
  65. if (g_last_fpu_thread == this)
  66. g_last_fpu_thread = nullptr;
  67. if (selector())
  68. gdt_free_entry(selector());
  69. if (m_kernel_stack_for_signal_handler) {
  70. kfree(m_kernel_stack_for_signal_handler);
  71. m_kernel_stack_for_signal_handler = nullptr;
  72. }
  73. }
  74. void Thread::unblock()
  75. {
  76. m_blocked_descriptor = nullptr;
  77. if (current == this) {
  78. m_state = Thread::Running;
  79. return;
  80. }
  81. ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
  82. m_state = Thread::Runnable;
  83. }
  84. void Thread::snooze_until(Alarm& alarm)
  85. {
  86. m_snoozing_alarm = &alarm;
  87. block(Thread::BlockedSnoozing);
  88. Scheduler::yield();
  89. }
  90. void Thread::block(Thread::State new_state)
  91. {
  92. bool did_unlock = process().big_lock().unlock_if_locked();
  93. if (state() != Thread::Running) {
  94. kprintf("Thread::block: %s(%u) block(%u/%s) with state=%u/%s\n", process().name().characters(), process().pid(), new_state, to_string(new_state), state(), to_string(state()));
  95. }
  96. ASSERT(state() == Thread::Running);
  97. m_was_interrupted_while_blocked = false;
  98. set_state(new_state);
  99. Scheduler::yield();
  100. if (did_unlock)
  101. process().big_lock().lock();
  102. }
  103. void Thread::block(Thread::State new_state, FileDescriptor& descriptor)
  104. {
  105. m_blocked_descriptor = &descriptor;
  106. block(new_state);
  107. }
  108. void Thread::sleep(dword ticks)
  109. {
  110. ASSERT(state() == Thread::Running);
  111. current->set_wakeup_time(g_uptime + ticks);
  112. current->block(Thread::BlockedSleep);
  113. }
  114. const char* to_string(Thread::State state)
  115. {
  116. switch (state) {
  117. case Thread::Invalid: return "Invalid";
  118. case Thread::Runnable: return "Runnable";
  119. case Thread::Running: return "Running";
  120. case Thread::Dying: return "Dying";
  121. case Thread::Dead: return "Dead";
  122. case Thread::Stopped: return "Stopped";
  123. case Thread::Skip1SchedulerPass: return "Skip1";
  124. case Thread::Skip0SchedulerPasses: return "Skip0";
  125. case Thread::BlockedSleep: return "Sleep";
  126. case Thread::BlockedWait: return "Wait";
  127. case Thread::BlockedRead: return "Read";
  128. case Thread::BlockedWrite: return "Write";
  129. case Thread::BlockedSignal: return "Signal";
  130. case Thread::BlockedSelect: return "Select";
  131. case Thread::BlockedLurking: return "Lurking";
  132. case Thread::BlockedConnect: return "Connect";
  133. case Thread::BlockedReceive: return "Receive";
  134. case Thread::BlockedSnoozing: return "Snoozing";
  135. }
  136. kprintf("to_string(Thread::State): Invalid state: %u\n", state);
  137. ASSERT_NOT_REACHED();
  138. return nullptr;
  139. }
  140. void Thread::finalize()
  141. {
  142. dbgprintf("Finalizing Thread %u in %s(%u)\n", tid(), m_process.name().characters(), pid());
  143. set_state(Thread::State::Dead);
  144. m_blocked_descriptor = nullptr;
  145. if (this == &m_process.main_thread())
  146. m_process.finalize();
  147. }
  148. void Thread::finalize_dying_threads()
  149. {
  150. Vector<Thread*, 32> dying_threads;
  151. {
  152. InterruptDisabler disabler;
  153. for_each_in_state(Thread::State::Dying, [&] (Thread& thread) {
  154. dying_threads.append(&thread);
  155. });
  156. }
  157. for (auto* thread : dying_threads)
  158. thread->finalize();
  159. }
  160. bool Thread::tick()
  161. {
  162. ++m_ticks;
  163. if (tss().cs & 3)
  164. ++m_process.m_ticks_in_user;
  165. else
  166. ++m_process.m_ticks_in_kernel;
  167. return --m_ticks_left;
  168. }
  169. void Thread::send_signal(byte signal, Process* sender)
  170. {
  171. ASSERT(signal < 32);
  172. if (sender)
  173. dbgprintf("signal: %s(%u) sent %d to %s(%u)\n", sender->name().characters(), sender->pid(), signal, process().name().characters(), pid());
  174. else
  175. dbgprintf("signal: kernel sent %d to %s(%u)\n", signal, process().name().characters(), pid());
  176. InterruptDisabler disabler;
  177. m_pending_signals |= 1 << signal;
  178. }
  179. bool Thread::has_unmasked_pending_signals() const
  180. {
  181. return m_pending_signals & ~m_signal_mask;
  182. }
  183. ShouldUnblockThread Thread::dispatch_one_pending_signal()
  184. {
  185. ASSERT_INTERRUPTS_DISABLED();
  186. dword signal_candidates = m_pending_signals & ~m_signal_mask;
  187. ASSERT(signal_candidates);
  188. byte signal = 0;
  189. for (; signal < 32; ++signal) {
  190. if (signal_candidates & (1 << signal)) {
  191. break;
  192. }
  193. }
  194. return dispatch_signal(signal);
  195. }
  196. enum class DefaultSignalAction {
  197. Terminate,
  198. Ignore,
  199. DumpCore,
  200. Stop,
  201. Continue,
  202. };
  203. DefaultSignalAction default_signal_action(byte signal)
  204. {
  205. ASSERT(signal && signal < NSIG);
  206. switch (signal) {
  207. case SIGHUP:
  208. case SIGINT:
  209. case SIGKILL:
  210. case SIGPIPE:
  211. case SIGALRM:
  212. case SIGUSR1:
  213. case SIGUSR2:
  214. case SIGVTALRM:
  215. case SIGSTKFLT:
  216. case SIGIO:
  217. case SIGPROF:
  218. case SIGTERM:
  219. case SIGPWR:
  220. return DefaultSignalAction::Terminate;
  221. case SIGCHLD:
  222. case SIGURG:
  223. case SIGWINCH:
  224. return DefaultSignalAction::Ignore;
  225. case SIGQUIT:
  226. case SIGILL:
  227. case SIGTRAP:
  228. case SIGABRT:
  229. case SIGBUS:
  230. case SIGFPE:
  231. case SIGSEGV:
  232. case SIGXCPU:
  233. case SIGXFSZ:
  234. case SIGSYS:
  235. return DefaultSignalAction::DumpCore;
  236. case SIGCONT:
  237. return DefaultSignalAction::Continue;
  238. case SIGSTOP:
  239. case SIGTSTP:
  240. case SIGTTIN:
  241. case SIGTTOU:
  242. return DefaultSignalAction::Stop;
  243. }
  244. ASSERT_NOT_REACHED();
  245. }
  246. ShouldUnblockThread Thread::dispatch_signal(byte signal)
  247. {
  248. ASSERT_INTERRUPTS_DISABLED();
  249. ASSERT(signal < 32);
  250. #ifdef SIGNAL_DEBUG
  251. kprintf("dispatch_signal %s(%u) <- %u\n", name().characters(), pid(), signal);
  252. #endif
  253. auto& action = m_signal_action_data[signal];
  254. // FIXME: Implement SA_SIGINFO signal handlers.
  255. ASSERT(!(action.flags & SA_SIGINFO));
  256. // Mark this signal as handled.
  257. m_pending_signals &= ~(1 << signal);
  258. if (signal == SIGSTOP) {
  259. set_state(Stopped);
  260. return ShouldUnblockThread::No;
  261. }
  262. if (signal == SIGCONT && state() == Stopped)
  263. set_state(Runnable);
  264. auto handler_laddr = action.handler_or_sigaction;
  265. if (handler_laddr.is_null()) {
  266. switch (default_signal_action(signal)) {
  267. case DefaultSignalAction::Stop:
  268. set_state(Stopped);
  269. return ShouldUnblockThread::No;
  270. case DefaultSignalAction::DumpCore:
  271. case DefaultSignalAction::Terminate:
  272. m_process.terminate_due_to_signal(signal);
  273. return ShouldUnblockThread::No;
  274. case DefaultSignalAction::Ignore:
  275. return ShouldUnblockThread::No;
  276. case DefaultSignalAction::Continue:
  277. return ShouldUnblockThread::Yes;
  278. }
  279. ASSERT_NOT_REACHED();
  280. }
  281. if (handler_laddr.as_ptr() == SIG_IGN) {
  282. #ifdef SIGNAL_DEBUG
  283. kprintf("%s(%u) ignored signal %u\n", name().characters(), pid(), signal);
  284. #endif
  285. return ShouldUnblockThread::Yes;
  286. }
  287. dword old_signal_mask = m_signal_mask;
  288. dword new_signal_mask = action.mask;
  289. if (action.flags & SA_NODEFER)
  290. new_signal_mask &= ~(1 << signal);
  291. else
  292. new_signal_mask |= 1 << signal;
  293. m_signal_mask |= new_signal_mask;
  294. Scheduler::prepare_to_modify_tss(*this);
  295. word ret_cs = m_tss.cs;
  296. dword ret_eip = m_tss.eip;
  297. dword ret_eflags = m_tss.eflags;
  298. bool interrupting_in_kernel = (ret_cs & 3) == 0;
  299. ProcessPagingScope paging_scope(m_process);
  300. m_process.create_signal_trampolines_if_needed();
  301. if (interrupting_in_kernel) {
  302. #ifdef SIGNAL_DEBUG
  303. kprintf("dispatch_signal to %s(%u) in state=%s with return to %w:%x\n", name().characters(), pid(), to_string(state()), ret_cs, ret_eip);
  304. #endif
  305. ASSERT(is_blocked());
  306. m_tss_to_resume_kernel = make<TSS32>(m_tss);
  307. #ifdef SIGNAL_DEBUG
  308. kprintf("resume tss pc: %w:%x stack: %w:%x flags: %x cr3: %x\n", m_tss_to_resume_kernel.cs, m_tss_to_resume_kernel->eip, m_tss_to_resume_kernel->ss, m_tss_to_resume_kernel->esp, m_tss_to_resume_kernel->eflags, m_tss_to_resume_kernel->cr3);
  309. #endif
  310. if (!m_signal_stack_user_region) {
  311. m_signal_stack_user_region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "Signal stack (user)");
  312. ASSERT(m_signal_stack_user_region);
  313. }
  314. if (!m_kernel_stack_for_signal_handler) {
  315. m_kernel_stack_for_signal_handler = kmalloc(default_kernel_stack_size);
  316. ASSERT(m_kernel_stack_for_signal_handler);
  317. }
  318. m_tss.ss = 0x23;
  319. m_tss.esp = m_signal_stack_user_region->laddr().offset(default_userspace_stack_size).get();
  320. m_tss.ss0 = 0x10;
  321. m_tss.esp0 = (dword)m_kernel_stack_for_signal_handler + default_kernel_stack_size;
  322. push_value_on_stack(0);
  323. } else {
  324. push_value_on_stack(ret_eip);
  325. push_value_on_stack(ret_eflags);
  326. // PUSHA
  327. dword old_esp = m_tss.esp;
  328. push_value_on_stack(m_tss.eax);
  329. push_value_on_stack(m_tss.ecx);
  330. push_value_on_stack(m_tss.edx);
  331. push_value_on_stack(m_tss.ebx);
  332. push_value_on_stack(old_esp);
  333. push_value_on_stack(m_tss.ebp);
  334. push_value_on_stack(m_tss.esi);
  335. push_value_on_stack(m_tss.edi);
  336. // Align the stack.
  337. m_tss.esp -= 12;
  338. }
  339. // PUSH old_signal_mask
  340. push_value_on_stack(old_signal_mask);
  341. m_tss.cs = 0x1b;
  342. m_tss.ds = 0x23;
  343. m_tss.es = 0x23;
  344. m_tss.fs = 0x23;
  345. m_tss.gs = 0x23;
  346. m_tss.eip = handler_laddr.get();
  347. // FIXME: Should we worry about the stack being 16 byte aligned when entering a signal handler?
  348. push_value_on_stack(signal);
  349. if (interrupting_in_kernel)
  350. push_value_on_stack(m_process.m_return_to_ring0_from_signal_trampoline.get());
  351. else
  352. push_value_on_stack(m_process.m_return_to_ring3_from_signal_trampoline.get());
  353. ASSERT((m_tss.esp % 16) == 0);
  354. // FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
  355. set_state(Skip1SchedulerPass);
  356. #ifdef SIGNAL_DEBUG
  357. kprintf("signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x\n", name().characters(), pid(), to_string(state()), m_tss.cs, m_tss.eip);
  358. #endif
  359. return ShouldUnblockThread::Yes;
  360. }
  361. void Thread::set_default_signal_dispositions()
  362. {
  363. // FIXME: Set up all the right default actions. See signal(7).
  364. memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
  365. m_signal_action_data[SIGCHLD].handler_or_sigaction = LinearAddress((dword)SIG_IGN);
  366. m_signal_action_data[SIGWINCH].handler_or_sigaction = LinearAddress((dword)SIG_IGN);
  367. }
  368. void Thread::push_value_on_stack(dword value)
  369. {
  370. m_tss.esp -= 4;
  371. dword* stack_ptr = (dword*)m_tss.esp;
  372. *stack_ptr = value;
  373. }
  374. void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment)
  375. {
  376. auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "Stack (Main thread)");
  377. ASSERT(region);
  378. m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
  379. char* stack_base = (char*)region->laddr().get();
  380. int argc = arguments.size();
  381. char** argv = (char**)stack_base;
  382. char** env = argv + arguments.size() + 1;
  383. char* bufptr = stack_base + (sizeof(char*) * (arguments.size() + 1)) + (sizeof(char*) * (environment.size() + 1));
  384. size_t total_blob_size = 0;
  385. for (auto& a : arguments)
  386. total_blob_size += a.length() + 1;
  387. for (auto& e : environment)
  388. total_blob_size += e.length() + 1;
  389. size_t total_meta_size = sizeof(char*) * (arguments.size() + 1) + sizeof(char*) * (environment.size() + 1);
  390. // FIXME: It would be better if this didn't make us panic.
  391. ASSERT((total_blob_size + total_meta_size) < default_userspace_stack_size);
  392. for (int i = 0; i < arguments.size(); ++i) {
  393. argv[i] = bufptr;
  394. memcpy(bufptr, arguments[i].characters(), arguments[i].length());
  395. bufptr += arguments[i].length();
  396. *(bufptr++) = '\0';
  397. }
  398. argv[arguments.size()] = nullptr;
  399. for (int i = 0; i < environment.size(); ++i) {
  400. env[i] = bufptr;
  401. memcpy(bufptr, environment[i].characters(), environment[i].length());
  402. bufptr += environment[i].length();
  403. *(bufptr++) = '\0';
  404. }
  405. env[environment.size()] = nullptr;
  406. // NOTE: The stack needs to be 16-byte aligned.
  407. push_value_on_stack((dword)env);
  408. push_value_on_stack((dword)argv);
  409. push_value_on_stack((dword)argc);
  410. push_value_on_stack(0);
  411. }
  412. void Thread::make_userspace_stack_for_secondary_thread(void *argument)
  413. {
  414. auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, String::format("Stack (Thread %d)", tid()));
  415. ASSERT(region);
  416. m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
  417. // NOTE: The stack needs to be 16-byte aligned.
  418. push_value_on_stack((dword)argument);
  419. push_value_on_stack(0);
  420. }
  421. Thread* Thread::clone(Process& process)
  422. {
  423. auto* clone = new Thread(process);
  424. memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
  425. clone->m_signal_mask = m_signal_mask;
  426. clone->m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
  427. memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
  428. clone->m_has_used_fpu = m_has_used_fpu;
  429. return clone;
  430. }
  431. KResult Thread::wait_for_connect(FileDescriptor& descriptor)
  432. {
  433. ASSERT(descriptor.is_socket());
  434. auto& socket = *descriptor.socket();
  435. if (socket.is_connected())
  436. return KSuccess;
  437. block(Thread::State::BlockedConnect, descriptor);
  438. Scheduler::yield();
  439. if (!socket.is_connected())
  440. return KResult(-ECONNREFUSED);
  441. return KSuccess;
  442. }
  443. void Thread::initialize()
  444. {
  445. g_threads = new InlineLinkedList<Thread>;
  446. Scheduler::initialize();
  447. }
  448. Vector<Thread*> Thread::all_threads()
  449. {
  450. Vector<Thread*> threads;
  451. InterruptDisabler disabler;
  452. for (auto* thread = g_threads->head(); thread; thread = thread->next())
  453. threads.append(thread);
  454. return threads;
  455. }
  456. bool Thread::is_thread(void* ptr)
  457. {
  458. ASSERT_INTERRUPTS_DISABLED();
  459. for (auto* thread = g_threads->head(); thread; thread = thread->next()) {
  460. if (thread == ptr)
  461. return true;
  462. }
  463. return false;
  464. }