Task.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. #include "types.h"
  2. #include "Task.h"
  3. #include "kmalloc.h"
  4. #include "VGA.h"
  5. #include "StdLib.h"
  6. #include "i386.h"
  7. #include "system.h"
  8. #include <VirtualFileSystem/FileHandle.h>
  9. #include <VirtualFileSystem/VirtualFileSystem.h>
  10. #include <ELFLoader/ExecSpace.h>
  11. #include "MemoryManager.h"
  12. //#define DEBUG_IO
  13. //#define TASK_DEBUG
  14. static const DWORD defaultStackSize = 16384;
  15. Task* current;
  16. Task* s_kernelTask;
  17. static pid_t next_pid;
  18. static InlineLinkedList<Task>* s_tasks;
  19. static InlineLinkedList<Task>* s_deadTasks;
  20. static bool contextSwitch(Task*);
  21. static void redoKernelTaskTSS()
  22. {
  23. if (!s_kernelTask->selector())
  24. s_kernelTask->setSelector(allocateGDTEntry());
  25. auto& tssDescriptor = getGDTEntry(s_kernelTask->selector());
  26. tssDescriptor.setBase(&s_kernelTask->tss());
  27. tssDescriptor.setLimit(0xffff);
  28. tssDescriptor.dpl = 0;
  29. tssDescriptor.segment_present = 1;
  30. tssDescriptor.granularity = 1;
  31. tssDescriptor.zero = 0;
  32. tssDescriptor.operation_size = 1;
  33. tssDescriptor.descriptor_type = 0;
  34. tssDescriptor.type = 9;
  35. flushGDT();
  36. }
  37. void Task::prepForIRETToNewTask()
  38. {
  39. redoKernelTaskTSS();
  40. s_kernelTask->tss().backlink = current->selector();
  41. loadTaskRegister(s_kernelTask->selector());
  42. }
  43. void Task::initialize()
  44. {
  45. current = nullptr;
  46. next_pid = 0;
  47. s_tasks = new InlineLinkedList<Task>;
  48. s_deadTasks = new InlineLinkedList<Task>;
  49. s_kernelTask = new Task(0, "colonel", IPC::Handle::Any, Task::Ring0);
  50. redoKernelTaskTSS();
  51. loadTaskRegister(s_kernelTask->selector());
  52. }
  53. #ifdef TASK_SANITY_CHECKS
  54. void Task::checkSanity(const char* msg)
  55. {
  56. char ch = current->name()[0];
  57. kprintf("<%p> %s{%u}%b [%d] :%b: sanity check <%s>\n",
  58. current->name().characters(),
  59. current->name().characters(),
  60. current->name().length(),
  61. current->name()[current->name().length() - 1],
  62. current->pid(), ch, msg ? msg : "");
  63. ASSERT((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'));
  64. }
  65. #endif
  66. void Task::allocateLDT()
  67. {
  68. ASSERT(!m_tss.ldt);
  69. static const WORD numLDTEntries = 4;
  70. WORD newLDTSelector = allocateGDTEntry();
  71. m_ldtEntries = new Descriptor[numLDTEntries];
  72. #if 0
  73. kprintf("new ldt selector = %x\n", newLDTSelector);
  74. kprintf("new ldt table at = %p\n", m_ldtEntries);
  75. kprintf("new ldt table size = %u\n", (numLDTEntries * 8) - 1);
  76. #endif
  77. Descriptor& ldt = getGDTEntry(newLDTSelector);
  78. ldt.setBase(m_ldtEntries);
  79. ldt.setLimit(numLDTEntries * 8 - 1);
  80. ldt.dpl = 0;
  81. ldt.segment_present = 1;
  82. ldt.granularity = 0;
  83. ldt.zero = 0;
  84. ldt.operation_size = 1;
  85. ldt.descriptor_type = 0;
  86. ldt.type = Descriptor::LDT;
  87. m_tss.ldt = newLDTSelector;
  88. }
  89. Vector<Task*> Task::allTasks()
  90. {
  91. Vector<Task*> tasks;
  92. tasks.ensureCapacity(s_tasks->sizeSlow());
  93. for (auto* task = s_tasks->head(); task; task = task->next())
  94. tasks.append(task);
  95. return tasks;
  96. }
  97. Task::Region* Task::allocateRegion(size_t size, String&& name)
  98. {
  99. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  100. auto zone = MemoryManager::the().createZone(size);
  101. ASSERT(zone);
  102. m_regions.append(make<Region>(m_nextRegion, size, move(zone), move(name)));
  103. m_nextRegion = m_nextRegion.offset(size).offset(16384);
  104. return m_regions.last().ptr();
  105. }
  106. bool Task::deallocateRegion(Region& region)
  107. {
  108. for (size_t i = 0; i < m_regions.size(); ++i) {
  109. if (m_regions[i].ptr() == &region) {
  110. // FIXME: This seems racy.
  111. MemoryManager::the().unmapRegion(*this, region);
  112. m_regions.remove(i);
  113. return true;
  114. }
  115. }
  116. return false;
  117. }
  118. Task::Region* Task::regionFromRange(LinearAddress laddr, size_t size)
  119. {
  120. for (auto& region : m_regions) {
  121. if (region->linearAddress == laddr && region->size == size)
  122. return region.ptr();
  123. }
  124. return nullptr;
  125. }
  126. void* Task::sys$mmap(void* addr, size_t size)
  127. {
  128. // FIXME: Implement mapping at a client-preferred address.
  129. ASSERT(addr == nullptr);
  130. auto* region = allocateRegion(size, "mmap");
  131. if (!region)
  132. return (void*)-1;
  133. MemoryManager::the().mapRegion(*this, *region);
  134. return (void*)region->linearAddress.get();
  135. }
  136. int Task::sys$munmap(void* addr, size_t size)
  137. {
  138. auto* region = regionFromRange(LinearAddress((dword)addr), size);
  139. if (!region)
  140. return -1;
  141. if (!deallocateRegion(*region))
  142. return -1;
  143. return 0;
  144. }
  145. int Task::sys$spawn(const char* path)
  146. {
  147. auto* child = Task::create(path, m_uid, m_gid);
  148. if (child)
  149. return child->pid();
  150. return -1;
  151. }
  152. Task* Task::create(const String& path, uid_t uid, gid_t gid)
  153. {
  154. auto parts = path.split('/');
  155. if (parts.isEmpty())
  156. return nullptr;
  157. auto handle = VirtualFileSystem::the().open(path);
  158. if (!handle)
  159. return nullptr;
  160. auto elfData = handle->readEntireFile();
  161. if (!elfData)
  162. return nullptr;
  163. InterruptDisabler disabler; // FIXME: Get rid of this, jesus christ. This "critical" section is HUGE.
  164. Task* t = new Task(parts.takeLast(), uid, gid);
  165. ExecSpace space;
  166. space.hookableAlloc = [&] (const String& name, size_t size) {
  167. if (!size)
  168. return (void*)nullptr;
  169. size = ((size / 4096) + 1) * 4096;
  170. Region* region = t->allocateRegion(size, String(name));
  171. ASSERT(region);
  172. MemoryManager::the().mapRegion(*t, *region);
  173. return (void*)region->linearAddress.asPtr();
  174. };
  175. bool success = space.loadELF(move(elfData));
  176. if (!success) {
  177. delete t;
  178. return nullptr;
  179. }
  180. t->m_tss.eip = (dword)space.symbolPtr("_start");
  181. if (!t->m_tss.eip) {
  182. delete t;
  183. return nullptr;
  184. }
  185. MemoryManager::the().unmapRegionsForTask(*t);
  186. MemoryManager::the().mapRegionsForTask(*current);
  187. s_tasks->prepend(t);
  188. system.nprocess++;
  189. #ifdef TASK_DEBUG
  190. kprintf("Task %u (%s) spawned @ %p\n", t->pid(), t->name().characters(), t->m_tss.eip);
  191. #endif
  192. return t;
  193. }
  194. Task::Task(String&& name, uid_t uid, gid_t gid)
  195. : m_name(move(name))
  196. , m_pid(next_pid++)
  197. , m_uid(uid)
  198. , m_gid(gid)
  199. , m_state(Runnable)
  200. , m_ring(Ring3)
  201. {
  202. m_fileHandles.append(nullptr);
  203. m_fileHandles.append(nullptr);
  204. m_fileHandles.append(nullptr);
  205. m_nextRegion = LinearAddress(0x600000);
  206. memset(&m_tss, 0, sizeof(m_tss));
  207. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  208. allocateLDT();
  209. // Only IF is set when a task boots.
  210. m_tss.eflags = 0x0202;
  211. WORD codeSegment = 0x1b;
  212. WORD dataSegment = 0x23;
  213. WORD stackSegment = dataSegment;
  214. m_tss.ds = dataSegment;
  215. m_tss.es = dataSegment;
  216. m_tss.fs = dataSegment;
  217. m_tss.gs = dataSegment;
  218. m_tss.ss = stackSegment;
  219. m_tss.cs = codeSegment;
  220. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  221. auto* region = allocateRegion(defaultStackSize, "stack");
  222. ASSERT(region);
  223. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  224. m_tss.esp = m_stackTop;
  225. // Set up a separate stack for Ring0.
  226. m_kernelStack = kmalloc(defaultStackSize);
  227. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  228. m_tss.ss0 = 0x10;
  229. m_tss.esp0 = ring0StackTop;
  230. // HACK: Ring2 SS in the TSS is the current PID.
  231. m_tss.ss2 = m_pid;
  232. m_farPtr.offset = 0x98765432;
  233. ASSERT(m_pid);
  234. }
  235. Task::Task(void (*e)(), const char* n, IPC::Handle h, RingLevel ring)
  236. : m_name(n)
  237. , m_entry(e)
  238. , m_pid(next_pid++)
  239. , m_handle(h)
  240. , m_state(Runnable)
  241. , m_ring(ring)
  242. {
  243. m_fileHandles.append(nullptr);
  244. m_fileHandles.append(nullptr);
  245. m_fileHandles.append(nullptr);
  246. m_nextRegion = LinearAddress(0x600000);
  247. Region* codeRegion = nullptr;
  248. if (!isRing0()) {
  249. codeRegion = allocateRegion(4096, "code");
  250. ASSERT(codeRegion);
  251. bool success = copyToZone(*codeRegion->zone, (void*)e, PAGE_SIZE);
  252. ASSERT(success);
  253. }
  254. memset(&m_tss, 0, sizeof(m_tss));
  255. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  256. if (ring == Ring3) {
  257. allocateLDT();
  258. }
  259. // Only IF is set when a task boots.
  260. m_tss.eflags = 0x0202;
  261. WORD dataSegment;
  262. WORD stackSegment;
  263. WORD codeSegment;
  264. if (ring == Ring0) {
  265. codeSegment = 0x08;
  266. dataSegment = 0x10;
  267. stackSegment = dataSegment;
  268. } else {
  269. codeSegment = 0x1b;
  270. dataSegment = 0x23;
  271. stackSegment = dataSegment;
  272. }
  273. m_tss.ds = dataSegment;
  274. m_tss.es = dataSegment;
  275. m_tss.fs = dataSegment;
  276. m_tss.gs = dataSegment;
  277. m_tss.ss = stackSegment;
  278. m_tss.cs = codeSegment;
  279. ASSERT((codeSegment & 3) == (stackSegment & 3));
  280. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  281. if (isRing0()) {
  282. m_tss.eip = (DWORD)m_entry;
  283. } else {
  284. m_tss.eip = codeRegion->linearAddress.get();
  285. }
  286. if (isRing0()) {
  287. // FIXME: This memory is leaked.
  288. // But uh, there's also no kernel task termination, so I guess it's not technically leaked...
  289. dword stackBottom = (dword)kmalloc(defaultStackSize);
  290. m_stackTop = (stackBottom + defaultStackSize) & 0xffffff8;
  291. m_tss.esp = m_stackTop;
  292. } else {
  293. auto* region = allocateRegion(defaultStackSize, "stack");
  294. ASSERT(region);
  295. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  296. m_tss.esp = m_stackTop;
  297. }
  298. if (ring == Ring3) {
  299. // Set up a separate stack for Ring0.
  300. // FIXME: Don't leak this stack either.
  301. m_kernelStack = kmalloc(defaultStackSize);
  302. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  303. m_tss.ss0 = 0x10;
  304. m_tss.esp0 = ring0StackTop;
  305. }
  306. // HACK: Ring2 SS in the TSS is the current PID.
  307. m_tss.ss2 = m_pid;
  308. m_farPtr.offset = 0x12345678;
  309. // Don't add task 0 (kernel dummy task) to task list.
  310. // FIXME: This doesn't belong in the constructor.
  311. if (m_pid == 0)
  312. return;
  313. // Add it to head of task list (meaning it's next to run too, ATM.)
  314. s_tasks->prepend(this);
  315. system.nprocess++;
  316. #ifdef TASK_DEBUG
  317. kprintf("Task %u (%s) spawned @ %p\n", m_pid, m_name.characters(), m_tss.eip);
  318. #endif
  319. }
  320. Task::~Task()
  321. {
  322. system.nprocess--;
  323. delete [] m_ldtEntries;
  324. m_ldtEntries = nullptr;
  325. if (m_kernelStack) {
  326. kfree(m_kernelStack);
  327. m_kernelStack = nullptr;
  328. }
  329. }
  330. void Task::dumpRegions()
  331. {
  332. kprintf("Task %s(%u) regions:\n", name().characters(), pid());
  333. kprintf("BEGIN END SIZE NAME\n");
  334. for (auto& region : m_regions) {
  335. kprintf("%x -- %x %x %s\n",
  336. region->linearAddress.get(),
  337. region->linearAddress.offset(region->size - 1).get(),
  338. region->size,
  339. region->name.characters());
  340. }
  341. }
  342. void Task::sys$exit(int status)
  343. {
  344. cli();
  345. #ifdef TASK_DEBUG
  346. kprintf("sys$exit: %s(%u) exit with status %d\n", name().characters(), pid(), status);
  347. #endif
  348. setState(Exiting);
  349. MemoryManager::the().unmapRegionsForTask(*this);
  350. s_tasks->remove(this);
  351. if (!scheduleNewTask()) {
  352. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  353. HANG;
  354. }
  355. s_deadTasks->append(this);
  356. switchNow();
  357. }
  358. void Task::taskDidCrash(Task* crashedTask)
  359. {
  360. // NOTE: This is called from an excepton handler, so interrupts are disabled.
  361. crashedTask->setState(Crashing);
  362. crashedTask->dumpRegions();
  363. s_tasks->remove(crashedTask);
  364. MemoryManager::the().unmapRegionsForTask(*crashedTask);
  365. if (!scheduleNewTask()) {
  366. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  367. HANG;
  368. }
  369. s_deadTasks->append(crashedTask);
  370. switchNow();
  371. }
  372. void Task::doHouseKeeping()
  373. {
  374. Task* next = nullptr;
  375. for (auto* deadTask = s_deadTasks->head(); deadTask; deadTask = next) {
  376. next = deadTask->next();
  377. delete deadTask;
  378. }
  379. s_deadTasks->clear();
  380. }
  381. void yield()
  382. {
  383. if (!current) {
  384. kprintf( "PANIC: yield() with !current" );
  385. HANG;
  386. }
  387. //kprintf("%s<%u> yield()\n", current->name().characters(), current->pid());
  388. InterruptDisabler disabler;
  389. if (!scheduleNewTask())
  390. return;
  391. //kprintf("yield() jumping to new task: %x (%s)\n", current->farPtr().selector, current->name().characters());
  392. switchNow();
  393. }
  394. void switchNow()
  395. {
  396. Descriptor& descriptor = getGDTEntry(current->selector());
  397. descriptor.type = 9;
  398. flushGDT();
  399. asm("sti\n"
  400. "ljmp *(%%eax)\n"
  401. ::"a"(&current->farPtr())
  402. );
  403. }
  404. bool scheduleNewTask()
  405. {
  406. if (!current) {
  407. // XXX: The first ever context_switch() goes to the idle task.
  408. // This to setup a reliable place we can return to.
  409. return contextSwitch(Task::kernelTask());
  410. }
  411. #if 0
  412. kprintf("Scheduler choices:\n");
  413. for (auto* task = s_tasks->head(); task; task = task->next()) {
  414. kprintf("%p %u %s\n", task, task->pid(), task->name().characters());
  415. }
  416. #endif
  417. // Check and unblock tasks whose wait conditions have been met.
  418. for (auto* task = s_tasks->head(); task; task = task->next()) {
  419. if (task->state() == Task::BlockedReceive && (task->ipc.msg.isValid() || task->ipc.notifies)) {
  420. task->unblock();
  421. continue;
  422. }
  423. if (task->state() == Task::BlockedSend) {
  424. Task* peer = Task::fromIPCHandle(task->ipc.dst);
  425. if (peer && peer->state() == Task::BlockedReceive && peer->acceptsMessageFrom(*task)) {
  426. task->unblock();
  427. continue;
  428. }
  429. }
  430. if (task->state() == Task::BlockedSleep) {
  431. if (task->wakeupTime() <= system.uptime) {
  432. task->unblock();
  433. continue;
  434. }
  435. }
  436. if (task->state() == Task::BlockedWait) {
  437. if (!Task::fromPID(task->waitee())) {
  438. task->unblock();
  439. continue;
  440. }
  441. }
  442. }
  443. auto* prevHead = s_tasks->head();
  444. for (;;) {
  445. // Move head to tail.
  446. s_tasks->append(s_tasks->removeHead());
  447. auto* task = s_tasks->head();
  448. if (task->state() == Task::Runnable || task->state() == Task::Running) {
  449. //kprintf("switch to %s (%p vs %p)\n", task->name().characters(), task, current);
  450. return contextSwitch(task);
  451. }
  452. if (task == prevHead) {
  453. // Back at task_head, nothing wants to run.
  454. kprintf("Nothing wants to run!\n");
  455. kprintf("PID OWNER STATE NSCHED NAME\n");
  456. for (auto* task = s_tasks->head(); task; task = task->next()) {
  457. kprintf("%w %w:%w %b %w %s\n",
  458. task->pid(),
  459. task->uid(),
  460. task->gid(),
  461. task->state(),
  462. task->timesScheduled(),
  463. task->name().characters());
  464. }
  465. kprintf("Switch to kernel task\n");
  466. return contextSwitch(Task::kernelTask());
  467. }
  468. }
  469. }
  470. static bool contextSwitch(Task* t)
  471. {
  472. //kprintf("c_s to %s (same:%u)\n", t->name().characters(), current == t);
  473. t->setTicksLeft(5);
  474. if (current == t)
  475. return false;
  476. // Some sanity checking to force a crash earlier.
  477. auto csRPL = t->tss().cs & 3;
  478. auto ssRPL = t->tss().ss & 3;
  479. if (csRPL != ssRPL) {
  480. kprintf("Fuckup! Switching from %s(%u) to %s(%u) has RPL mismatch\n",
  481. current->name().characters(), current->pid(),
  482. t->name().characters(), t->pid()
  483. );
  484. kprintf("code: %w:%x\n", t->tss().cs, t->tss().eip);
  485. kprintf(" stk: %w:%x\n", t->tss().ss, t->tss().esp);
  486. ASSERT(csRPL == ssRPL);
  487. }
  488. if (current) {
  489. // If the last task hasn't blocked (still marked as running),
  490. // mark it as runnable for the next round.
  491. if (current->state() == Task::Running)
  492. current->setState(Task::Runnable);
  493. bool success = MemoryManager::the().unmapRegionsForTask(*current);
  494. ASSERT(success);
  495. }
  496. bool success = MemoryManager::the().mapRegionsForTask(*t);
  497. ASSERT(success);
  498. current = t;
  499. t->setState(Task::Running);
  500. if (!t->selector())
  501. t->setSelector(allocateGDTEntry());
  502. auto& tssDescriptor = getGDTEntry(t->selector());
  503. tssDescriptor.limit_hi = 0;
  504. tssDescriptor.limit_lo = 0xFFFF;
  505. tssDescriptor.base_lo = (DWORD)(&t->tss()) & 0xFFFF;
  506. tssDescriptor.base_hi = ((DWORD)(&t->tss()) >> 16) & 0xFF;
  507. tssDescriptor.base_hi2 = ((DWORD)(&t->tss()) >> 24) & 0xFF;
  508. tssDescriptor.dpl = 0;
  509. tssDescriptor.segment_present = 1;
  510. tssDescriptor.granularity = 1;
  511. tssDescriptor.zero = 0;
  512. tssDescriptor.operation_size = 1;
  513. tssDescriptor.descriptor_type = 0;
  514. tssDescriptor.type = 11; // Busy TSS
  515. flushGDT();
  516. t->didSchedule();
  517. return true;
  518. }
  519. Task* Task::fromPID(pid_t pid)
  520. {
  521. for (auto* task = s_tasks->head(); task; task = task->next()) {
  522. if (task->pid() == pid)
  523. return task;
  524. }
  525. return nullptr;
  526. }
  527. Task* Task::fromIPCHandle(IPC::Handle handle)
  528. {
  529. for (auto* task = s_tasks->head(); task; task = task->next()) {
  530. if (task->handle() == handle)
  531. return task;
  532. }
  533. return nullptr;
  534. }
  535. FileHandle* Task::fileHandleIfExists(int fd)
  536. {
  537. if (fd < 0)
  538. return nullptr;
  539. if ((unsigned)fd < m_fileHandles.size())
  540. return m_fileHandles[fd].ptr();
  541. return nullptr;
  542. }
  543. ssize_t Task::sys$get_dir_entries(int fd, void* buffer, size_t size)
  544. {
  545. auto* handle = fileHandleIfExists(fd);
  546. if (!handle)
  547. return -1;
  548. return handle->get_dir_entries((byte*)buffer, size);
  549. }
  550. int Task::sys$seek(int fd, int offset)
  551. {
  552. auto* handle = fileHandleIfExists(fd);
  553. if (!handle)
  554. return -1;
  555. return handle->seek(offset, SEEK_SET);
  556. }
  557. ssize_t Task::sys$read(int fd, void* outbuf, size_t nread)
  558. {
  559. Task::checkSanity("Task::sys$read");
  560. #ifdef DEBUG_IO
  561. kprintf("Task::sys$read: called(%d, %p, %u)\n", fd, outbuf, nread);
  562. #endif
  563. auto* handle = fileHandleIfExists(fd);
  564. #ifdef DEBUG_IO
  565. kprintf("Task::sys$read: handle=%p\n", handle);
  566. #endif
  567. if (!handle) {
  568. kprintf("Task::sys$read: handle not found :(\n");
  569. return -1;
  570. }
  571. #ifdef DEBUG_IO
  572. kprintf("call read on handle=%p\n", handle);
  573. #endif
  574. nread = handle->read((byte*)outbuf, nread);
  575. #ifdef DEBUG_IO
  576. kprintf("Task::sys$read: nread=%u\n", nread);
  577. #endif
  578. return nread;
  579. }
  580. int Task::sys$close(int fd)
  581. {
  582. auto* handle = fileHandleIfExists(fd);
  583. if (!handle)
  584. return -1;
  585. // FIXME: Implement.
  586. return 0;
  587. }
  588. int Task::sys$open(const char* path, size_t pathLength)
  589. {
  590. Task::checkSanity("sys$open");
  591. #ifdef DEBUG_IO
  592. kprintf("Task::sys$open(): PID=%u, path=%s {%u}\n", m_pid, path, pathLength);
  593. #endif
  594. auto* handle = current->openFile(String(path, pathLength));
  595. if (handle)
  596. return handle->fd();
  597. return -1;
  598. }
  599. FileHandle* Task::openFile(String&& path)
  600. {
  601. auto handle = VirtualFileSystem::the().open(move(path));
  602. if (!handle) {
  603. #ifdef DEBUG_IO
  604. kprintf("vfs::open() failed\n");
  605. #endif
  606. return nullptr;
  607. }
  608. handle->setFD(m_fileHandles.size());
  609. #ifdef DEBUG_IO
  610. kprintf("vfs::open() worked! handle=%p, fd=%d\n", handle.ptr(), handle->fd());
  611. #endif
  612. m_fileHandles.append(move(handle)); // FIXME: allow non-move Vector::append
  613. return m_fileHandles.last().ptr();
  614. }
  615. int Task::sys$kill(pid_t pid, int sig)
  616. {
  617. (void) sig;
  618. if (pid == 0) {
  619. // FIXME: Send to same-group processes.
  620. ASSERT(pid != 0);
  621. }
  622. if (pid == -1) {
  623. // FIXME: Send to all processes.
  624. ASSERT(pid != -1);
  625. }
  626. ASSERT_NOT_REACHED();
  627. Task* peer = Task::fromPID(pid);
  628. if (!peer) {
  629. // errno = ESRCH;
  630. return -1;
  631. }
  632. #if 0
  633. send(peer->handle(), IPC::Message(SYS_KILL, DataBuffer::copy((BYTE*)&sig, sizeof(sig))));
  634. IPC::Message response = receive(peer->handle());
  635. return *(int*)response.data();
  636. #endif
  637. return -1;
  638. }
  639. uid_t Task::sys$getuid()
  640. {
  641. return m_uid;
  642. }
  643. gid_t Task::sys$getgid()
  644. {
  645. return m_gid;
  646. }
  647. pid_t Task::sys$getpid()
  648. {
  649. return m_pid;
  650. }
  651. pid_t Task::sys$waitpid(pid_t waitee)
  652. {
  653. if (!Task::fromPID(waitee))
  654. return -1;
  655. m_waitee = waitee;
  656. block(BlockedWait);
  657. yield();
  658. return m_waitee;
  659. }
  660. bool Task::acceptsMessageFrom(Task& peer)
  661. {
  662. return !ipc.msg.isValid() && (ipc.src == IPC::Handle::Any || ipc.src == peer.handle());
  663. }
  664. void Task::unblock()
  665. {
  666. ASSERT(m_state != Task::Runnable && m_state != Task::Running);
  667. system.nblocked--;
  668. m_state = Task::Runnable;
  669. }
  670. void Task::block(Task::State state)
  671. {
  672. ASSERT(current->state() == Task::Running);
  673. system.nblocked++;
  674. current->setState(state);
  675. }
  676. void block(Task::State state)
  677. {
  678. current->block(state);
  679. yield();
  680. }
  681. void sleep(DWORD ticks)
  682. {
  683. ASSERT(current->state() == Task::Running);
  684. current->setWakeupTime(system.uptime + ticks);
  685. current->block(Task::BlockedSleep);
  686. yield();
  687. }
  688. void Task::sys$sleep(DWORD ticks)
  689. {
  690. ASSERT(this == current);
  691. sleep(ticks);
  692. }
  693. Task* Task::kernelTask()
  694. {
  695. ASSERT(s_kernelTask);
  696. return s_kernelTask;
  697. }
  698. void Task::setError(int error)
  699. {
  700. m_error = error;
  701. }
  702. Task::Region::Region(LinearAddress a, size_t s, RetainPtr<Zone>&& z, String&& n)
  703. : linearAddress(a)
  704. , size(s)
  705. , zone(move(z))
  706. , name(move(n))
  707. {
  708. }
  709. Task::Region::~Region()
  710. {
  711. }