Task.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. #include "types.h"
  2. #include "Task.h"
  3. #include "kmalloc.h"
  4. #include "VGA.h"
  5. #include "StdLib.h"
  6. #include "i386.h"
  7. #include "system.h"
  8. #include <VirtualFileSystem/FileHandle.h>
  9. #include <VirtualFileSystem/VirtualFileSystem.h>
  10. #include <ELFLoader/ExecSpace.h>
  11. #include "MemoryManager.h"
  12. //#define DEBUG_IO
  13. Task* current;
  14. Task* s_kernelTask;
  15. static pid_t next_pid;
  16. static InlineLinkedList<Task>* s_tasks;
  17. static bool contextSwitch(Task*);
  18. static void redoKernelTaskTSS()
  19. {
  20. if (!s_kernelTask->selector())
  21. s_kernelTask->setSelector(allocateGDTEntry());
  22. auto& tssDescriptor = getGDTEntry(s_kernelTask->selector());
  23. tssDescriptor.setBase(&s_kernelTask->tss());
  24. tssDescriptor.setLimit(0xffff);
  25. tssDescriptor.dpl = 0;
  26. tssDescriptor.segment_present = 1;
  27. tssDescriptor.granularity = 1;
  28. tssDescriptor.zero = 0;
  29. tssDescriptor.operation_size = 1;
  30. tssDescriptor.descriptor_type = 0;
  31. tssDescriptor.type = 9;
  32. flushGDT();
  33. }
  34. void Task::prepForIRETToNewTask()
  35. {
  36. redoKernelTaskTSS();
  37. s_kernelTask->tss().backlink = current->selector();
  38. loadTaskRegister(s_kernelTask->selector());
  39. }
  40. void Task::initialize()
  41. {
  42. current = nullptr;
  43. next_pid = 0;
  44. s_tasks = new InlineLinkedList<Task>;
  45. s_kernelTask = new Task(0, "colonel", IPC::Handle::Any, Task::Ring0);
  46. redoKernelTaskTSS();
  47. loadTaskRegister(s_kernelTask->selector());
  48. }
  49. #ifdef TASK_SANITY_CHECKS
  50. void Task::checkSanity(const char* msg)
  51. {
  52. char ch = current->name()[0];
  53. kprintf("<%p> %s{%u}%b [%d] :%b: sanity check <%s>\n",
  54. current->name().characters(),
  55. current->name().characters(),
  56. current->name().length(),
  57. current->name()[current->name().length() - 1],
  58. current->pid(), ch, msg ? msg : "");
  59. ASSERT((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'));
  60. }
  61. #endif
  62. void Task::allocateLDT()
  63. {
  64. ASSERT(!m_tss.ldt);
  65. static const WORD numLDTEntries = 4;
  66. WORD newLDTSelector = allocateGDTEntry();
  67. m_ldtEntries = new Descriptor[numLDTEntries];
  68. #if 0
  69. kprintf("new ldt selector = %x\n", newLDTSelector);
  70. kprintf("new ldt table at = %p\n", m_ldtEntries);
  71. kprintf("new ldt table size = %u\n", (numLDTEntries * 8) - 1);
  72. #endif
  73. Descriptor& ldt = getGDTEntry(newLDTSelector);
  74. ldt.setBase(m_ldtEntries);
  75. ldt.setLimit(numLDTEntries * 8 - 1);
  76. ldt.dpl = 0;
  77. ldt.segment_present = 1;
  78. ldt.granularity = 0;
  79. ldt.zero = 0;
  80. ldt.operation_size = 1;
  81. ldt.descriptor_type = 0;
  82. ldt.type = Descriptor::LDT;
  83. m_tss.ldt = newLDTSelector;
  84. }
  85. Task::Region* Task::allocateRegion(size_t size, String&& name)
  86. {
  87. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  88. auto zone = MemoryManager::the().createZone(size);
  89. ASSERT(zone);
  90. m_regions.append(make<Region>(m_nextRegion, size, move(zone), move(name)));
  91. m_nextRegion = m_nextRegion.offset(size).offset(16384);
  92. return m_regions.last().ptr();
  93. }
  94. int Task::sys$spawn(const char* path)
  95. {
  96. auto* child = Task::create(path, m_uid, m_gid);
  97. if (child)
  98. return child->pid();
  99. return -1;
  100. }
  101. Task* Task::create(const String& path, uid_t uid, gid_t gid)
  102. {
  103. auto parts = path.split('/');
  104. if (parts.isEmpty())
  105. return nullptr;
  106. auto handle = VirtualFileSystem::the().open(path);
  107. if (!handle)
  108. return nullptr;
  109. auto elfData = handle->readEntireFile();
  110. if (!elfData)
  111. return nullptr;
  112. cli();
  113. Task* t = new Task(parts.takeLast(), uid, gid);
  114. ExecSpace space;
  115. space.hookableAlloc = [&] (const String& name, size_t size) {
  116. if (!size)
  117. return (void*)nullptr;
  118. size = ((size / 4096) + 1) * 4096;
  119. Region* region = t->allocateRegion(size, String(name));
  120. ASSERT(region);
  121. MemoryManager::the().mapRegion(*t, *region);
  122. return (void*)region->linearAddress.asPtr();
  123. };
  124. bool success = space.loadELF(move(elfData));
  125. if (!success) {
  126. delete t;
  127. return nullptr;
  128. }
  129. t->m_tss.eip = (dword)space.symbolPtr("_start");
  130. if (!t->m_tss.eip) {
  131. delete t;
  132. return nullptr;
  133. }
  134. MemoryManager::the().unmapRegionsForTask(*t);
  135. MemoryManager::the().mapRegionsForTask(*current);
  136. s_tasks->prepend(t);
  137. system.nprocess++;
  138. kprintf("Task %u (%s) spawned @ %p\n", t->pid(), t->name().characters(), t->m_tss.eip);
  139. sti();
  140. return t;
  141. }
  142. Task::Task(String&& name, uid_t uid, gid_t gid)
  143. : m_name(move(name))
  144. , m_pid(next_pid++)
  145. , m_uid(uid)
  146. , m_gid(gid)
  147. , m_state(Runnable)
  148. , m_ring(Ring3)
  149. {
  150. m_nextRegion = LinearAddress(0x600000);
  151. memset(&m_tss, 0, sizeof(m_tss));
  152. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  153. allocateLDT();
  154. // Only IF is set when a task boots.
  155. m_tss.eflags = 0x0202;
  156. WORD codeSegment = 0x1b;
  157. WORD dataSegment = 0x23;
  158. WORD stackSegment = dataSegment;
  159. m_tss.ds = dataSegment;
  160. m_tss.es = dataSegment;
  161. m_tss.fs = dataSegment;
  162. m_tss.gs = dataSegment;
  163. m_tss.ss = stackSegment;
  164. m_tss.cs = codeSegment;
  165. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  166. // NOTE: Each task gets 16KB of stack.
  167. static const DWORD defaultStackSize = 16384;
  168. auto* region = allocateRegion(defaultStackSize, "stack");
  169. ASSERT(region);
  170. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  171. m_tss.esp = m_stackTop;
  172. // Set up a separate stack for Ring0.
  173. // FIXME: Don't leak this stack.
  174. m_kernelStack = kmalloc(defaultStackSize);
  175. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  176. m_tss.ss0 = 0x10;
  177. m_tss.esp0 = ring0StackTop;
  178. // HACK: Ring2 SS in the TSS is the current PID.
  179. m_tss.ss2 = m_pid;
  180. m_farPtr.offset = 0x98765432;
  181. ASSERT(m_pid);
  182. }
  183. Task::Task(void (*e)(), const char* n, IPC::Handle h, RingLevel ring)
  184. : m_name(n)
  185. , m_entry(e)
  186. , m_pid(next_pid++)
  187. , m_handle(h)
  188. , m_state(Runnable)
  189. , m_ring(ring)
  190. {
  191. m_nextRegion = LinearAddress(0x600000);
  192. Region* codeRegion = nullptr;
  193. if (!isRing0()) {
  194. codeRegion = allocateRegion(4096, "code");
  195. ASSERT(codeRegion);
  196. bool success = copyToZone(*codeRegion->zone, (void*)e, PAGE_SIZE);
  197. ASSERT(success);
  198. }
  199. memset(&m_tss, 0, sizeof(m_tss));
  200. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  201. if (ring == Ring3) {
  202. allocateLDT();
  203. }
  204. // Only IF is set when a task boots.
  205. m_tss.eflags = 0x0202;
  206. WORD dataSegment;
  207. WORD stackSegment;
  208. WORD codeSegment;
  209. if (ring == Ring0) {
  210. codeSegment = 0x08;
  211. dataSegment = 0x10;
  212. stackSegment = dataSegment;
  213. } else {
  214. codeSegment = 0x1b;
  215. dataSegment = 0x23;
  216. stackSegment = dataSegment;
  217. }
  218. m_tss.ds = dataSegment;
  219. m_tss.es = dataSegment;
  220. m_tss.fs = dataSegment;
  221. m_tss.gs = dataSegment;
  222. m_tss.ss = stackSegment;
  223. m_tss.cs = codeSegment;
  224. ASSERT((codeSegment & 3) == (stackSegment & 3));
  225. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  226. if (isRing0()) {
  227. m_tss.eip = (DWORD)m_entry;
  228. } else {
  229. m_tss.eip = codeRegion->linearAddress.get();
  230. }
  231. // NOTE: Each task gets 16KB of stack.
  232. static const DWORD defaultStackSize = 16384;
  233. if (isRing0()) {
  234. // FIXME: This memory is leaked.
  235. // But uh, there's also no kernel task termination, so I guess it's not technically leaked...
  236. dword stackBottom = (dword)kmalloc(defaultStackSize);
  237. m_stackTop = (stackBottom + defaultStackSize) & 0xffffff8;
  238. m_tss.esp = m_stackTop;
  239. } else {
  240. auto* region = allocateRegion(defaultStackSize, "stack");
  241. ASSERT(region);
  242. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  243. m_tss.esp = m_stackTop;
  244. }
  245. if (ring == Ring3) {
  246. // Set up a separate stack for Ring0.
  247. // FIXME: Don't leak this stack either.
  248. m_kernelStack = kmalloc(defaultStackSize);
  249. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  250. m_tss.ss0 = 0x10;
  251. m_tss.esp0 = ring0StackTop;
  252. }
  253. // HACK: Ring2 SS in the TSS is the current PID.
  254. m_tss.ss2 = m_pid;
  255. m_farPtr.offset = 0x12345678;
  256. // Don't add task 0 (kernel dummy task) to task list.
  257. // FIXME: This doesn't belong in the constructor.
  258. if (m_pid == 0)
  259. return;
  260. // Add it to head of task list (meaning it's next to run too, ATM.)
  261. s_tasks->prepend(this);
  262. system.nprocess++;
  263. kprintf("Task %u (%s) spawned @ %p\n", m_pid, m_name.characters(), m_tss.eip);
  264. }
  265. Task::~Task()
  266. {
  267. system.nprocess--;
  268. delete [] m_ldtEntries;
  269. m_ldtEntries = nullptr;
  270. // FIXME: The task's kernel stack is currently leaked, because otherwise we GPF.
  271. // This obviously needs figuring out.
  272. #if 0
  273. if (m_kernelStack) {
  274. kfree(m_kernelStack);
  275. m_kernelStack = nullptr;
  276. }
  277. #endif
  278. }
  279. void Task::dumpRegions()
  280. {
  281. kprintf("Task %s(%u) regions:\n", name().characters(), pid());
  282. kprintf("BEGIN END SIZE NAME\n");
  283. for (auto& region : m_regions) {
  284. kprintf("%x -- %x %x %s\n",
  285. region->linearAddress.get(),
  286. region->linearAddress.offset(region->size - 1).get(),
  287. region->size,
  288. region->name.characters());
  289. }
  290. }
  291. void Task::sys$exit(int status)
  292. {
  293. cli();
  294. kprintf("sys$exit: %s(%u) exit with status %d\n", name().characters(), pid(), status);
  295. setState(Exiting);
  296. s_tasks->remove(this);
  297. if (!scheduleNewTask()) {
  298. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  299. HANG;
  300. }
  301. delete this;
  302. switchNow();
  303. }
  304. void Task::taskDidCrash(Task* crashedTask)
  305. {
  306. // NOTE: This is called from an excepton handler, so interrupts are disabled.
  307. crashedTask->setState(Crashing);
  308. // crashedTask->dumpRegions();
  309. s_tasks->remove(crashedTask);
  310. MemoryManager::the().unmapRegionsForTask(*crashedTask);
  311. if (!scheduleNewTask()) {
  312. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  313. HANG;
  314. }
  315. delete crashedTask;
  316. switchNow();
  317. }
  318. void yield()
  319. {
  320. if (!current) {
  321. kprintf( "PANIC: yield() with !current" );
  322. HANG;
  323. }
  324. //kprintf("%s<%u> yield()\n", current->name().characters(), current->pid());
  325. cli();
  326. if (!scheduleNewTask()) {
  327. sti();
  328. return;
  329. }
  330. //kprintf("yield() jumping to new task: %x (%s)\n", current->farPtr().selector, current->name().characters());
  331. switchNow();
  332. }
  333. void switchNow()
  334. {
  335. Descriptor& descriptor = getGDTEntry(current->selector());
  336. descriptor.type = 9;
  337. flushGDT();
  338. asm("sti\n"
  339. "ljmp *(%%eax)\n"
  340. ::"a"(&current->farPtr())
  341. );
  342. }
  343. bool scheduleNewTask()
  344. {
  345. if (!current) {
  346. // XXX: The first ever context_switch() goes to the idle task.
  347. // This to setup a reliable place we can return to.
  348. return contextSwitch(Task::kernelTask());
  349. }
  350. #if 0
  351. kprintf("Scheduler choices:\n");
  352. for (auto* task = s_tasks->head(); task; task = task->next()) {
  353. kprintf("%p %u %s\n", task, task->pid(), task->name().characters());
  354. }
  355. #endif
  356. // Check and unblock tasks whose wait conditions have been met.
  357. for (auto* task = s_tasks->head(); task; task = task->next()) {
  358. if (task->state() == Task::BlockedReceive && (task->ipc.msg.isValid() || task->ipc.notifies)) {
  359. task->unblock();
  360. continue;
  361. }
  362. if (task->state() == Task::BlockedSend) {
  363. Task* peer = Task::fromIPCHandle(task->ipc.dst);
  364. if (peer && peer->state() == Task::BlockedReceive && peer->acceptsMessageFrom(*task)) {
  365. task->unblock();
  366. continue;
  367. }
  368. }
  369. if (task->state() == Task::BlockedSleep) {
  370. if (task->wakeupTime() <= system.uptime) {
  371. task->unblock();
  372. continue;
  373. }
  374. }
  375. }
  376. auto* prevHead = s_tasks->head();
  377. for (;;) {
  378. // Move head to tail.
  379. s_tasks->append(s_tasks->removeHead());
  380. auto* task = s_tasks->head();
  381. if (task->state() == Task::Runnable || task->state() == Task::Running) {
  382. //kprintf("switch to %s (%p vs %p)\n", task->name().characters(), task, current);
  383. return contextSwitch(task);
  384. }
  385. if (task == prevHead) {
  386. // Back at task_head, nothing wants to run.
  387. kprintf("Switch to kernel task\n");
  388. return contextSwitch(Task::kernelTask());
  389. }
  390. }
  391. }
  392. static void drawSchedulerBanner(Task& task)
  393. {
  394. return;
  395. // FIXME: We need a kernel lock to do stuff like this :(
  396. //return;
  397. auto c = vga_get_cursor();
  398. auto a = vga_get_attr();
  399. vga_set_cursor(0, 50);
  400. vga_set_attr(0x20);
  401. kprintf(" ");
  402. kprintf(" ");
  403. kprintf(" ");
  404. vga_set_cursor(0, 50);
  405. kprintf("pid: %u ", task.pid());
  406. vga_set_cursor(0, 58);
  407. kprintf("%s", task.name().characters());
  408. vga_set_cursor(0, 65);
  409. kprintf("eip: %p", task.tss().eip);
  410. vga_set_attr(a);
  411. vga_set_cursor(c);
  412. }
  413. static bool contextSwitch(Task* t)
  414. {
  415. //kprintf("c_s to %s (same:%u)\n", t->name().characters(), current == t);
  416. t->setTicksLeft(5);
  417. if (current == t)
  418. return false;
  419. // Some sanity checking to force a crash earlier.
  420. auto csRPL = t->tss().cs & 3;
  421. auto ssRPL = t->tss().ss & 3;
  422. if (csRPL != ssRPL) {
  423. kprintf("Fuckup! Switching from %s(%u) to %s(%u) has RPL mismatch\n",
  424. current->name().characters(), current->pid(),
  425. t->name().characters(), t->pid()
  426. );
  427. kprintf("code: %w:%x\n", t->tss().cs, t->tss().eip);
  428. kprintf(" stk: %w:%x\n", t->tss().ss, t->tss().esp);
  429. ASSERT(csRPL == ssRPL);
  430. }
  431. if (current) {
  432. // If the last task hasn't blocked (still marked as running),
  433. // mark it as runnable for the next round.
  434. if (current->state() == Task::Running)
  435. current->setState(Task::Runnable);
  436. bool success = MemoryManager::the().unmapRegionsForTask(*current);
  437. ASSERT(success);
  438. }
  439. bool success = MemoryManager::the().mapRegionsForTask(*t);
  440. ASSERT(success);
  441. current = t;
  442. t->setState(Task::Running);
  443. if (!t->selector())
  444. t->setSelector(allocateGDTEntry());
  445. auto& tssDescriptor = getGDTEntry(t->selector());
  446. tssDescriptor.limit_hi = 0;
  447. tssDescriptor.limit_lo = 0xFFFF;
  448. tssDescriptor.base_lo = (DWORD)(&t->tss()) & 0xFFFF;
  449. tssDescriptor.base_hi = ((DWORD)(&t->tss()) >> 16) & 0xFF;
  450. tssDescriptor.base_hi2 = ((DWORD)(&t->tss()) >> 24) & 0xFF;
  451. tssDescriptor.dpl = 0;
  452. tssDescriptor.segment_present = 1;
  453. tssDescriptor.granularity = 1;
  454. tssDescriptor.zero = 0;
  455. tssDescriptor.operation_size = 1;
  456. tssDescriptor.descriptor_type = 0;
  457. tssDescriptor.type = 11; // Busy TSS
  458. flushGDT();
  459. drawSchedulerBanner(*t);
  460. t->didSchedule();
  461. return true;
  462. }
  463. Task* Task::fromPID(pid_t pid)
  464. {
  465. for (auto* task = s_tasks->head(); task; task = task->next()) {
  466. if (task->pid() == pid)
  467. return task;
  468. }
  469. return nullptr;
  470. }
  471. Task* Task::fromIPCHandle(IPC::Handle handle)
  472. {
  473. for (auto* task = s_tasks->head(); task; task = task->next()) {
  474. if (task->handle() == handle)
  475. return task;
  476. }
  477. return nullptr;
  478. }
  479. FileHandle* Task::fileHandleIfExists(int fd)
  480. {
  481. if (fd < 0)
  482. return nullptr;
  483. if ((unsigned)fd < m_fileHandles.size())
  484. return m_fileHandles[fd].ptr();
  485. return nullptr;
  486. }
  487. int Task::sys$seek(int fd, int offset)
  488. {
  489. auto* handle = fileHandleIfExists(fd);
  490. if (!handle)
  491. return -1;
  492. return handle->seek(offset, SEEK_SET);
  493. }
  494. ssize_t Task::sys$read(int fd, void* outbuf, size_t nread)
  495. {
  496. Task::checkSanity("Task::sys$read");
  497. #ifdef DEBUG_IO
  498. kprintf("Task::sys$read: called(%d, %p, %u)\n", fd, outbuf, nread);
  499. #endif
  500. auto* handle = fileHandleIfExists(fd);
  501. #ifdef DEBUG_IO
  502. kprintf("Task::sys$read: handle=%p\n", handle);
  503. #endif
  504. if (!handle) {
  505. kprintf("Task::sys$read: handle not found :(\n");
  506. return -1;
  507. }
  508. #ifdef DEBUG_IO
  509. kprintf("call read on handle=%p\n", handle);
  510. #endif
  511. nread = handle->read((byte*)outbuf, nread);
  512. #ifdef DEBUG_IO
  513. kprintf("Task::sys$read: nread=%u\n", nread);
  514. #endif
  515. return nread;
  516. }
  517. int Task::sys$close(int fd)
  518. {
  519. auto* handle = fileHandleIfExists(fd);
  520. if (!handle)
  521. return -1;
  522. // FIXME: Implement.
  523. return 0;
  524. }
  525. int Task::sys$open(const char* path, size_t pathLength)
  526. {
  527. Task::checkSanity("sys$open");
  528. #ifdef DEBUG_IO
  529. kprintf("Task::sys$open(): PID=%u, path=%s {%u}\n", m_pid, path, pathLength);
  530. #endif
  531. auto* handle = current->openFile(String(path, pathLength));
  532. if (handle)
  533. return handle->fd();
  534. return -1;
  535. }
  536. FileHandle* Task::openFile(String&& path)
  537. {
  538. auto handle = VirtualFileSystem::the().open(move(path));
  539. if (!handle) {
  540. kprintf("vfs::open() failed\n");
  541. return nullptr;
  542. }
  543. handle->setFD(m_fileHandles.size());
  544. #ifdef DEBUG_IO
  545. kprintf("vfs::open() worked! handle=%p, fd=%d\n", handle.ptr(), handle->fd());
  546. #endif
  547. m_fileHandles.append(move(handle)); // FIXME: allow non-move Vector::append
  548. return m_fileHandles.last().ptr();
  549. }
  550. int Task::sys$kill(pid_t pid, int sig)
  551. {
  552. (void) sig;
  553. if (pid == 0) {
  554. // FIXME: Send to same-group processes.
  555. ASSERT(pid != 0);
  556. }
  557. if (pid == -1) {
  558. // FIXME: Send to all processes.
  559. ASSERT(pid != -1);
  560. }
  561. ASSERT_NOT_REACHED();
  562. Task* peer = Task::fromPID(pid);
  563. if (!peer) {
  564. // errno = ESRCH;
  565. return -1;
  566. }
  567. #if 0
  568. send(peer->handle(), IPC::Message(SYS_KILL, DataBuffer::copy((BYTE*)&sig, sizeof(sig))));
  569. IPC::Message response = receive(peer->handle());
  570. return *(int*)response.data();
  571. #endif
  572. return -1;
  573. }
  574. uid_t Task::sys$getuid()
  575. {
  576. return m_uid;
  577. }
  578. gid_t Task::sys$getgid()
  579. {
  580. return m_gid;
  581. }
  582. pid_t Task::sys$getpid()
  583. {
  584. return m_pid;
  585. }
  586. bool Task::acceptsMessageFrom(Task& peer)
  587. {
  588. return !ipc.msg.isValid() && (ipc.src == IPC::Handle::Any || ipc.src == peer.handle());
  589. }
  590. void Task::unblock()
  591. {
  592. ASSERT(m_state != Task::Runnable && m_state != Task::Running);
  593. system.nblocked--;
  594. m_state = Task::Runnable;
  595. }
  596. void Task::block(Task::State state)
  597. {
  598. ASSERT(current->state() == Task::Running);
  599. system.nblocked++;
  600. current->setState(state);
  601. }
  602. void block(Task::State state)
  603. {
  604. current->block(state);
  605. yield();
  606. }
  607. void sleep(DWORD ticks)
  608. {
  609. ASSERT(current->state() == Task::Running);
  610. current->setWakeupTime(system.uptime + ticks);
  611. current->block(Task::BlockedSleep);
  612. yield();
  613. }
  614. void Task::sys$sleep(DWORD ticks)
  615. {
  616. ASSERT(this == current);
  617. sleep(ticks);
  618. }
  619. Task* Task::kernelTask()
  620. {
  621. ASSERT(s_kernelTask);
  622. return s_kernelTask;
  623. }
  624. void Task::setError(int error)
  625. {
  626. m_error = error;
  627. }
  628. Task::Region::Region(LinearAddress a, size_t s, RetainPtr<Zone>&& z, String&& n)
  629. : linearAddress(a)
  630. , size(s)
  631. , zone(move(z))
  632. , name(move(n))
  633. {
  634. }
  635. Task::Region::~Region()
  636. {
  637. }