Thread.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. #include <Kernel/Thread.h>
  2. #include <Kernel/Scheduler.h>
  3. #include <Kernel/Process.h>
  4. #include <Kernel/FileSystem/FileDescriptor.h>
  5. #include <Kernel/VM/MemoryManager.h>
  6. #include <LibC/signal_numbers.h>
  7. InlineLinkedList<Thread>* g_threads;
  8. static const dword default_kernel_stack_size = 16384;
  9. static const dword default_userspace_stack_size = 65536;
  10. Thread::Thread(Process& process)
  11. : m_process(process)
  12. , m_tid(process.m_next_tid++)
  13. {
  14. dbgprintf("Thread{%p}: New thread TID=%u in %s(%u)\n", this, m_tid, process.name().characters(), process.pid());
  15. set_default_signal_dispositions();
  16. m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
  17. memset(&m_tss, 0, sizeof(m_tss));
  18. // Only IF is set when a process boots.
  19. m_tss.eflags = 0x0202;
  20. word cs, ds, ss;
  21. if (m_process.is_ring0()) {
  22. cs = 0x08;
  23. ds = 0x10;
  24. ss = 0x10;
  25. } else {
  26. cs = 0x1b;
  27. ds = 0x23;
  28. ss = 0x23;
  29. }
  30. m_tss.ds = ds;
  31. m_tss.es = ds;
  32. m_tss.fs = ds;
  33. m_tss.gs = ds;
  34. m_tss.ss = ss;
  35. m_tss.cs = cs;
  36. m_tss.cr3 = m_process.page_directory().cr3();
  37. if (m_process.is_ring0()) {
  38. // FIXME: This memory is leaked.
  39. // But uh, there's also no kernel process termination, so I guess it's not technically leaked...
  40. dword stack_bottom = (dword)kmalloc_eternal(default_kernel_stack_size);
  41. m_tss.esp = (stack_bottom + default_kernel_stack_size) & 0xffffff8;
  42. } else {
  43. // Ring3 processes need a separate stack for Ring0.
  44. m_kernel_stack = kmalloc(default_kernel_stack_size);
  45. m_tss.ss0 = 0x10;
  46. m_tss.esp0 = ((dword)m_kernel_stack + default_kernel_stack_size) & 0xffffff8;
  47. }
  48. // HACK: Ring2 SS in the TSS is the current PID.
  49. m_tss.ss2 = m_process.pid();
  50. m_far_ptr.offset = 0x98765432;
  51. if (m_process.pid() != 0) {
  52. InterruptDisabler disabler;
  53. g_threads->prepend(this);
  54. }
  55. }
  56. Thread::~Thread()
  57. {
  58. dbgprintf("~Thread{%p}\n", this);
  59. kfree_aligned(m_fpu_state);
  60. {
  61. InterruptDisabler disabler;
  62. g_threads->remove(this);
  63. }
  64. if (g_last_fpu_thread == this)
  65. g_last_fpu_thread = nullptr;
  66. if (selector())
  67. gdt_free_entry(selector());
  68. if (m_kernel_stack) {
  69. kfree(m_kernel_stack);
  70. m_kernel_stack = nullptr;
  71. }
  72. if (m_kernel_stack_for_signal_handler) {
  73. kfree(m_kernel_stack_for_signal_handler);
  74. m_kernel_stack_for_signal_handler = nullptr;
  75. }
  76. }
  77. void Thread::unblock()
  78. {
  79. m_blocked_descriptor = nullptr;
  80. if (current == this) {
  81. m_state = Thread::Running;
  82. return;
  83. }
  84. ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
  85. m_state = Thread::Runnable;
  86. }
  87. void Thread::snooze_until(Alarm& alarm)
  88. {
  89. m_snoozing_alarm = &alarm;
  90. block(Thread::BlockedSnoozing);
  91. Scheduler::yield();
  92. }
  93. void Thread::block(Thread::State new_state)
  94. {
  95. bool did_unlock = process().big_lock().unlock_if_locked();
  96. if (state() != Thread::Running) {
  97. kprintf("Thread::block: %s(%u) block(%u/%s) with state=%u/%s\n", process().name().characters(), process().pid(), new_state, to_string(new_state), state(), to_string(state()));
  98. }
  99. ASSERT(state() == Thread::Running);
  100. m_was_interrupted_while_blocked = false;
  101. set_state(new_state);
  102. Scheduler::yield();
  103. if (did_unlock)
  104. process().big_lock().lock();
  105. }
  106. void Thread::block(Thread::State new_state, FileDescriptor& descriptor)
  107. {
  108. m_blocked_descriptor = &descriptor;
  109. block(new_state);
  110. }
  111. void Thread::sleep(dword ticks)
  112. {
  113. ASSERT(state() == Thread::Running);
  114. current->set_wakeup_time(g_uptime + ticks);
  115. current->block(Thread::BlockedSleep);
  116. }
  117. const char* to_string(Thread::State state)
  118. {
  119. switch (state) {
  120. case Thread::Invalid: return "Invalid";
  121. case Thread::Runnable: return "Runnable";
  122. case Thread::Running: return "Running";
  123. case Thread::Dying: return "Dying";
  124. case Thread::Dead: return "Dead";
  125. case Thread::Stopped: return "Stopped";
  126. case Thread::Skip1SchedulerPass: return "Skip1";
  127. case Thread::Skip0SchedulerPasses: return "Skip0";
  128. case Thread::BlockedSleep: return "Sleep";
  129. case Thread::BlockedWait: return "Wait";
  130. case Thread::BlockedRead: return "Read";
  131. case Thread::BlockedWrite: return "Write";
  132. case Thread::BlockedSignal: return "Signal";
  133. case Thread::BlockedSelect: return "Select";
  134. case Thread::BlockedLurking: return "Lurking";
  135. case Thread::BlockedConnect: return "Connect";
  136. case Thread::BlockedReceive: return "Receive";
  137. case Thread::BlockedSnoozing: return "Snoozing";
  138. }
  139. kprintf("to_string(Thread::State): Invalid state: %u\n", state);
  140. ASSERT_NOT_REACHED();
  141. return nullptr;
  142. }
  143. void Thread::finalize()
  144. {
  145. dbgprintf("Finalizing Thread %u in %s(%u)\n", tid(), m_process.name().characters(), pid());
  146. set_state(Thread::State::Dead);
  147. m_blocked_descriptor = nullptr;
  148. if (this == &m_process.main_thread())
  149. m_process.finalize();
  150. }
  151. void Thread::finalize_dying_threads()
  152. {
  153. Vector<Thread*, 32> dying_threads;
  154. {
  155. InterruptDisabler disabler;
  156. for_each_in_state(Thread::State::Dying, [&] (Thread& thread) {
  157. dying_threads.append(&thread);
  158. });
  159. }
  160. for (auto* thread : dying_threads)
  161. thread->finalize();
  162. }
  163. bool Thread::tick()
  164. {
  165. ++m_ticks;
  166. if (tss().cs & 3)
  167. ++m_process.m_ticks_in_user;
  168. else
  169. ++m_process.m_ticks_in_kernel;
  170. return --m_ticks_left;
  171. }
  172. void Thread::send_signal(byte signal, Process* sender)
  173. {
  174. ASSERT(signal < 32);
  175. if (sender)
  176. dbgprintf("signal: %s(%u) sent %d to %s(%u)\n", sender->name().characters(), sender->pid(), signal, process().name().characters(), pid());
  177. else
  178. dbgprintf("signal: kernel sent %d to %s(%u)\n", signal, process().name().characters(), pid());
  179. InterruptDisabler disabler;
  180. m_pending_signals |= 1 << signal;
  181. }
  182. bool Thread::has_unmasked_pending_signals() const
  183. {
  184. return m_pending_signals & ~m_signal_mask;
  185. }
  186. ShouldUnblockThread Thread::dispatch_one_pending_signal()
  187. {
  188. ASSERT_INTERRUPTS_DISABLED();
  189. dword signal_candidates = m_pending_signals & ~m_signal_mask;
  190. ASSERT(signal_candidates);
  191. byte signal = 0;
  192. for (; signal < 32; ++signal) {
  193. if (signal_candidates & (1 << signal)) {
  194. break;
  195. }
  196. }
  197. return dispatch_signal(signal);
  198. }
  199. enum class DefaultSignalAction {
  200. Terminate,
  201. Ignore,
  202. DumpCore,
  203. Stop,
  204. Continue,
  205. };
  206. DefaultSignalAction default_signal_action(byte signal)
  207. {
  208. ASSERT(signal && signal < NSIG);
  209. switch (signal) {
  210. case SIGHUP:
  211. case SIGINT:
  212. case SIGKILL:
  213. case SIGPIPE:
  214. case SIGALRM:
  215. case SIGUSR1:
  216. case SIGUSR2:
  217. case SIGVTALRM:
  218. case SIGSTKFLT:
  219. case SIGIO:
  220. case SIGPROF:
  221. case SIGTERM:
  222. case SIGPWR:
  223. return DefaultSignalAction::Terminate;
  224. case SIGCHLD:
  225. case SIGURG:
  226. case SIGWINCH:
  227. return DefaultSignalAction::Ignore;
  228. case SIGQUIT:
  229. case SIGILL:
  230. case SIGTRAP:
  231. case SIGABRT:
  232. case SIGBUS:
  233. case SIGFPE:
  234. case SIGSEGV:
  235. case SIGXCPU:
  236. case SIGXFSZ:
  237. case SIGSYS:
  238. return DefaultSignalAction::DumpCore;
  239. case SIGCONT:
  240. return DefaultSignalAction::Continue;
  241. case SIGSTOP:
  242. case SIGTSTP:
  243. case SIGTTIN:
  244. case SIGTTOU:
  245. return DefaultSignalAction::Stop;
  246. }
  247. ASSERT_NOT_REACHED();
  248. }
  249. ShouldUnblockThread Thread::dispatch_signal(byte signal)
  250. {
  251. ASSERT_INTERRUPTS_DISABLED();
  252. ASSERT(signal < 32);
  253. #ifdef SIGNAL_DEBUG
  254. kprintf("dispatch_signal %s(%u) <- %u\n", name().characters(), pid(), signal);
  255. #endif
  256. auto& action = m_signal_action_data[signal];
  257. // FIXME: Implement SA_SIGINFO signal handlers.
  258. ASSERT(!(action.flags & SA_SIGINFO));
  259. // Mark this signal as handled.
  260. m_pending_signals &= ~(1 << signal);
  261. if (signal == SIGSTOP) {
  262. set_state(Stopped);
  263. return ShouldUnblockThread::No;
  264. }
  265. if (signal == SIGCONT && state() == Stopped)
  266. set_state(Runnable);
  267. auto handler_laddr = action.handler_or_sigaction;
  268. if (handler_laddr.is_null()) {
  269. switch (default_signal_action(signal)) {
  270. case DefaultSignalAction::Stop:
  271. set_state(Stopped);
  272. return ShouldUnblockThread::No;
  273. case DefaultSignalAction::DumpCore:
  274. case DefaultSignalAction::Terminate:
  275. m_process.terminate_due_to_signal(signal);
  276. return ShouldUnblockThread::No;
  277. case DefaultSignalAction::Ignore:
  278. return ShouldUnblockThread::No;
  279. case DefaultSignalAction::Continue:
  280. return ShouldUnblockThread::Yes;
  281. }
  282. ASSERT_NOT_REACHED();
  283. }
  284. if (handler_laddr.as_ptr() == SIG_IGN) {
  285. #ifdef SIGNAL_DEBUG
  286. kprintf("%s(%u) ignored signal %u\n", name().characters(), pid(), signal);
  287. #endif
  288. return ShouldUnblockThread::Yes;
  289. }
  290. dword old_signal_mask = m_signal_mask;
  291. dword new_signal_mask = action.mask;
  292. if (action.flags & SA_NODEFER)
  293. new_signal_mask &= ~(1 << signal);
  294. else
  295. new_signal_mask |= 1 << signal;
  296. m_signal_mask |= new_signal_mask;
  297. Scheduler::prepare_to_modify_tss(*this);
  298. word ret_cs = m_tss.cs;
  299. dword ret_eip = m_tss.eip;
  300. dword ret_eflags = m_tss.eflags;
  301. bool interrupting_in_kernel = (ret_cs & 3) == 0;
  302. ProcessPagingScope paging_scope(m_process);
  303. m_process.create_signal_trampolines_if_needed();
  304. if (interrupting_in_kernel) {
  305. #ifdef SIGNAL_DEBUG
  306. kprintf("dispatch_signal to %s(%u) in state=%s with return to %w:%x\n", name().characters(), pid(), to_string(state()), ret_cs, ret_eip);
  307. #endif
  308. ASSERT(is_blocked());
  309. m_tss_to_resume_kernel = make<TSS32>(m_tss);
  310. #ifdef SIGNAL_DEBUG
  311. kprintf("resume tss pc: %w:%x stack: %w:%x flags: %x cr3: %x\n", m_tss_to_resume_kernel.cs, m_tss_to_resume_kernel->eip, m_tss_to_resume_kernel->ss, m_tss_to_resume_kernel->esp, m_tss_to_resume_kernel->eflags, m_tss_to_resume_kernel->cr3);
  312. #endif
  313. if (!m_signal_stack_user_region) {
  314. m_signal_stack_user_region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "Signal stack (user)");
  315. ASSERT(m_signal_stack_user_region);
  316. }
  317. if (!m_kernel_stack_for_signal_handler) {
  318. m_kernel_stack_for_signal_handler = kmalloc(default_kernel_stack_size);
  319. ASSERT(m_kernel_stack_for_signal_handler);
  320. }
  321. m_tss.ss = 0x23;
  322. m_tss.esp = m_signal_stack_user_region->laddr().offset(default_userspace_stack_size).get();
  323. m_tss.ss0 = 0x10;
  324. m_tss.esp0 = (dword)m_kernel_stack_for_signal_handler + default_kernel_stack_size;
  325. push_value_on_stack(0);
  326. } else {
  327. push_value_on_stack(ret_eip);
  328. push_value_on_stack(ret_eflags);
  329. // PUSHA
  330. dword old_esp = m_tss.esp;
  331. push_value_on_stack(m_tss.eax);
  332. push_value_on_stack(m_tss.ecx);
  333. push_value_on_stack(m_tss.edx);
  334. push_value_on_stack(m_tss.ebx);
  335. push_value_on_stack(old_esp);
  336. push_value_on_stack(m_tss.ebp);
  337. push_value_on_stack(m_tss.esi);
  338. push_value_on_stack(m_tss.edi);
  339. // Align the stack.
  340. m_tss.esp -= 12;
  341. }
  342. // PUSH old_signal_mask
  343. push_value_on_stack(old_signal_mask);
  344. m_tss.cs = 0x1b;
  345. m_tss.ds = 0x23;
  346. m_tss.es = 0x23;
  347. m_tss.fs = 0x23;
  348. m_tss.gs = 0x23;
  349. m_tss.eip = handler_laddr.get();
  350. // FIXME: Should we worry about the stack being 16 byte aligned when entering a signal handler?
  351. push_value_on_stack(signal);
  352. if (interrupting_in_kernel)
  353. push_value_on_stack(m_process.m_return_to_ring0_from_signal_trampoline.get());
  354. else
  355. push_value_on_stack(m_process.m_return_to_ring3_from_signal_trampoline.get());
  356. ASSERT((m_tss.esp % 16) == 0);
  357. // FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
  358. set_state(Skip1SchedulerPass);
  359. #ifdef SIGNAL_DEBUG
  360. kprintf("signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x\n", name().characters(), pid(), to_string(state()), m_tss.cs, m_tss.eip);
  361. #endif
  362. return ShouldUnblockThread::Yes;
  363. }
  364. void Thread::set_default_signal_dispositions()
  365. {
  366. // FIXME: Set up all the right default actions. See signal(7).
  367. memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
  368. m_signal_action_data[SIGCHLD].handler_or_sigaction = LinearAddress((dword)SIG_IGN);
  369. m_signal_action_data[SIGWINCH].handler_or_sigaction = LinearAddress((dword)SIG_IGN);
  370. }
  371. void Thread::push_value_on_stack(dword value)
  372. {
  373. m_tss.esp -= 4;
  374. dword* stack_ptr = (dword*)m_tss.esp;
  375. *stack_ptr = value;
  376. }
  377. void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment)
  378. {
  379. auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "stack");
  380. ASSERT(region);
  381. m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
  382. char* stack_base = (char*)region->laddr().get();
  383. int argc = arguments.size();
  384. char** argv = (char**)stack_base;
  385. char** env = argv + arguments.size() + 1;
  386. char* bufptr = stack_base + (sizeof(char*) * (arguments.size() + 1)) + (sizeof(char*) * (environment.size() + 1));
  387. size_t total_blob_size = 0;
  388. for (auto& a : arguments)
  389. total_blob_size += a.length() + 1;
  390. for (auto& e : environment)
  391. total_blob_size += e.length() + 1;
  392. size_t total_meta_size = sizeof(char*) * (arguments.size() + 1) + sizeof(char*) * (environment.size() + 1);
  393. // FIXME: It would be better if this didn't make us panic.
  394. ASSERT((total_blob_size + total_meta_size) < default_userspace_stack_size);
  395. for (int i = 0; i < arguments.size(); ++i) {
  396. argv[i] = bufptr;
  397. memcpy(bufptr, arguments[i].characters(), arguments[i].length());
  398. bufptr += arguments[i].length();
  399. *(bufptr++) = '\0';
  400. }
  401. argv[arguments.size()] = nullptr;
  402. for (int i = 0; i < environment.size(); ++i) {
  403. env[i] = bufptr;
  404. memcpy(bufptr, environment[i].characters(), environment[i].length());
  405. bufptr += environment[i].length();
  406. *(bufptr++) = '\0';
  407. }
  408. env[environment.size()] = nullptr;
  409. // NOTE: The stack needs to be 16-byte aligned.
  410. push_value_on_stack((dword)env);
  411. push_value_on_stack((dword)argv);
  412. push_value_on_stack((dword)argc);
  413. push_value_on_stack(0);
  414. }
  415. void Thread::make_userspace_stack_for_secondary_thread(void *argument)
  416. {
  417. auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, String::format("Thread %u Stack", tid()));
  418. ASSERT(region);
  419. m_tss.esp = region->laddr().offset(default_userspace_stack_size).get();
  420. // NOTE: The stack needs to be 16-byte aligned.
  421. push_value_on_stack((dword)argument);
  422. push_value_on_stack(0);
  423. }
  424. Thread* Thread::clone(Process& process)
  425. {
  426. auto* clone = new Thread(process);
  427. memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
  428. clone->m_signal_mask = m_signal_mask;
  429. clone->m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
  430. memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
  431. clone->m_has_used_fpu = m_has_used_fpu;
  432. return clone;
  433. }
  434. KResult Thread::wait_for_connect(FileDescriptor& descriptor)
  435. {
  436. ASSERT(descriptor.is_socket());
  437. auto& socket = *descriptor.socket();
  438. if (socket.is_connected())
  439. return KSuccess;
  440. block(Thread::State::BlockedConnect, descriptor);
  441. Scheduler::yield();
  442. if (!socket.is_connected())
  443. return KResult(-ECONNREFUSED);
  444. return KSuccess;
  445. }
  446. void Thread::initialize()
  447. {
  448. g_threads = new InlineLinkedList<Thread>;
  449. Scheduler::initialize();
  450. }
  451. Vector<Thread*> Thread::all_threads()
  452. {
  453. Vector<Thread*> threads;
  454. InterruptDisabler disabler;
  455. for (auto* thread = g_threads->head(); thread; thread = thread->next())
  456. threads.append(thread);
  457. return threads;
  458. }
  459. bool Thread::is_thread(void* ptr)
  460. {
  461. ASSERT_INTERRUPTS_DISABLED();
  462. for (auto* thread = g_threads->head(); thread; thread = thread->next()) {
  463. if (thread == ptr)
  464. return true;
  465. }
  466. return false;
  467. }