Task.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. #include "types.h"
  2. #include "Task.h"
  3. #include "kmalloc.h"
  4. #include "VGA.h"
  5. #include "StdLib.h"
  6. #include "i386.h"
  7. #include "system.h"
  8. #include <VirtualFileSystem/FileHandle.h>
  9. #include <VirtualFileSystem/VirtualFileSystem.h>
  10. #include <ELFLoader/ExecSpace.h>
  11. #include "MemoryManager.h"
  12. //#define DEBUG_IO
  13. //#define TASK_DEBUG
  14. static const DWORD defaultStackSize = 16384;
  15. Task* current;
  16. Task* s_kernelTask;
  17. static pid_t next_pid;
  18. static InlineLinkedList<Task>* s_tasks;
  19. static InlineLinkedList<Task>* s_deadTasks;
  20. static bool contextSwitch(Task*);
  21. static void redoKernelTaskTSS()
  22. {
  23. if (!s_kernelTask->selector())
  24. s_kernelTask->setSelector(allocateGDTEntry());
  25. auto& tssDescriptor = getGDTEntry(s_kernelTask->selector());
  26. tssDescriptor.setBase(&s_kernelTask->tss());
  27. tssDescriptor.setLimit(0xffff);
  28. tssDescriptor.dpl = 0;
  29. tssDescriptor.segment_present = 1;
  30. tssDescriptor.granularity = 1;
  31. tssDescriptor.zero = 0;
  32. tssDescriptor.operation_size = 1;
  33. tssDescriptor.descriptor_type = 0;
  34. tssDescriptor.type = 9;
  35. flushGDT();
  36. }
  37. void Task::prepForIRETToNewTask()
  38. {
  39. redoKernelTaskTSS();
  40. s_kernelTask->tss().backlink = current->selector();
  41. loadTaskRegister(s_kernelTask->selector());
  42. }
  43. void Task::initialize()
  44. {
  45. current = nullptr;
  46. next_pid = 0;
  47. s_tasks = new InlineLinkedList<Task>;
  48. s_deadTasks = new InlineLinkedList<Task>;
  49. s_kernelTask = new Task(0, "colonel", IPC::Handle::Any, Task::Ring0);
  50. redoKernelTaskTSS();
  51. loadTaskRegister(s_kernelTask->selector());
  52. }
  53. #ifdef TASK_SANITY_CHECKS
  54. void Task::checkSanity(const char* msg)
  55. {
  56. char ch = current->name()[0];
  57. kprintf("<%p> %s{%u}%b [%d] :%b: sanity check <%s>\n",
  58. current->name().characters(),
  59. current->name().characters(),
  60. current->name().length(),
  61. current->name()[current->name().length() - 1],
  62. current->pid(), ch, msg ? msg : "");
  63. ASSERT((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'));
  64. }
  65. #endif
  66. void Task::allocateLDT()
  67. {
  68. ASSERT(!m_tss.ldt);
  69. static const WORD numLDTEntries = 4;
  70. WORD newLDTSelector = allocateGDTEntry();
  71. m_ldtEntries = new Descriptor[numLDTEntries];
  72. #if 0
  73. kprintf("new ldt selector = %x\n", newLDTSelector);
  74. kprintf("new ldt table at = %p\n", m_ldtEntries);
  75. kprintf("new ldt table size = %u\n", (numLDTEntries * 8) - 1);
  76. #endif
  77. Descriptor& ldt = getGDTEntry(newLDTSelector);
  78. ldt.setBase(m_ldtEntries);
  79. ldt.setLimit(numLDTEntries * 8 - 1);
  80. ldt.dpl = 0;
  81. ldt.segment_present = 1;
  82. ldt.granularity = 0;
  83. ldt.zero = 0;
  84. ldt.operation_size = 1;
  85. ldt.descriptor_type = 0;
  86. ldt.type = Descriptor::LDT;
  87. m_tss.ldt = newLDTSelector;
  88. }
  89. Vector<Task*> Task::allTasks()
  90. {
  91. Vector<Task*> tasks;
  92. tasks.ensureCapacity(s_tasks->sizeSlow());
  93. for (auto* task = s_tasks->head(); task; task = task->next())
  94. tasks.append(task);
  95. return tasks;
  96. }
  97. Task::Region* Task::allocateRegion(size_t size, String&& name)
  98. {
  99. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  100. auto zone = MemoryManager::the().createZone(size);
  101. ASSERT(zone);
  102. m_regions.append(make<Region>(m_nextRegion, size, move(zone), move(name)));
  103. m_nextRegion = m_nextRegion.offset(size).offset(16384);
  104. return m_regions.last().ptr();
  105. }
  106. bool Task::deallocateRegion(Region& region)
  107. {
  108. for (size_t i = 0; i < m_regions.size(); ++i) {
  109. if (m_regions[i].ptr() == &region) {
  110. // FIXME: This seems racy.
  111. MemoryManager::the().unmapRegion(*this, region);
  112. m_regions.remove(i);
  113. return true;
  114. }
  115. }
  116. return false;
  117. }
  118. Task::Region* Task::regionFromRange(LinearAddress laddr, size_t size)
  119. {
  120. for (auto& region : m_regions) {
  121. if (region->linearAddress == laddr && region->size == size)
  122. return region.ptr();
  123. }
  124. return nullptr;
  125. }
  126. void* Task::sys$mmap(void* addr, size_t size)
  127. {
  128. // FIXME: Implement mapping at a client-preferred address.
  129. ASSERT(addr == nullptr);
  130. auto* region = allocateRegion(size, "mmap");
  131. if (!region)
  132. return (void*)-1;
  133. MemoryManager::the().mapRegion(*this, *region);
  134. return (void*)region->linearAddress.get();
  135. }
  136. int Task::sys$munmap(void* addr, size_t size)
  137. {
  138. auto* region = regionFromRange(LinearAddress((dword)addr), size);
  139. if (!region)
  140. return -1;
  141. if (!deallocateRegion(*region))
  142. return -1;
  143. return 0;
  144. }
  145. int Task::sys$spawn(const char* path)
  146. {
  147. auto* child = Task::create(path, m_uid, m_gid, m_pid);
  148. if (child)
  149. return child->pid();
  150. return -1;
  151. }
  152. Task* Task::create(const String& path, uid_t uid, gid_t gid, pid_t parentPID)
  153. {
  154. auto parts = path.split('/');
  155. if (parts.isEmpty())
  156. return nullptr;
  157. auto handle = VirtualFileSystem::the().open(path);
  158. if (!handle)
  159. return nullptr;
  160. auto elfData = handle->readEntireFile();
  161. if (!elfData)
  162. return nullptr;
  163. InterruptDisabler disabler; // FIXME: Get rid of this, jesus christ. This "critical" section is HUGE.
  164. Task* t = new Task(parts.takeLast(), uid, gid, parentPID);
  165. ExecSpace space;
  166. space.hookableAlloc = [&] (const String& name, size_t size) {
  167. if (!size)
  168. return (void*)nullptr;
  169. size = ((size / 4096) + 1) * 4096;
  170. Region* region = t->allocateRegion(size, String(name));
  171. ASSERT(region);
  172. MemoryManager::the().mapRegion(*t, *region);
  173. return (void*)region->linearAddress.asPtr();
  174. };
  175. bool success = space.loadELF(move(elfData));
  176. if (!success) {
  177. delete t;
  178. return nullptr;
  179. }
  180. t->m_tss.eip = (dword)space.symbolPtr("_start");
  181. if (!t->m_tss.eip) {
  182. delete t;
  183. return nullptr;
  184. }
  185. MemoryManager::the().unmapRegionsForTask(*t);
  186. MemoryManager::the().mapRegionsForTask(*current);
  187. s_tasks->prepend(t);
  188. system.nprocess++;
  189. #ifdef TASK_DEBUG
  190. kprintf("Task %u (%s) spawned @ %p\n", t->pid(), t->name().characters(), t->m_tss.eip);
  191. #endif
  192. return t;
  193. }
  194. Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID)
  195. : m_name(move(name))
  196. , m_pid(next_pid++)
  197. , m_uid(uid)
  198. , m_gid(gid)
  199. , m_state(Runnable)
  200. , m_ring(Ring3)
  201. , m_parentPID(parentPID)
  202. {
  203. m_fileHandles.append(nullptr);
  204. m_fileHandles.append(nullptr);
  205. m_fileHandles.append(nullptr);
  206. auto* parentTask = Task::fromPID(parentPID);
  207. if (parentTask)
  208. m_cwd = parentTask->m_cwd;
  209. else
  210. m_cwd = "/";
  211. m_nextRegion = LinearAddress(0x600000);
  212. memset(&m_tss, 0, sizeof(m_tss));
  213. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  214. allocateLDT();
  215. // Only IF is set when a task boots.
  216. m_tss.eflags = 0x0202;
  217. WORD codeSegment = 0x1b;
  218. WORD dataSegment = 0x23;
  219. WORD stackSegment = dataSegment;
  220. m_tss.ds = dataSegment;
  221. m_tss.es = dataSegment;
  222. m_tss.fs = dataSegment;
  223. m_tss.gs = dataSegment;
  224. m_tss.ss = stackSegment;
  225. m_tss.cs = codeSegment;
  226. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  227. auto* region = allocateRegion(defaultStackSize, "stack");
  228. ASSERT(region);
  229. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  230. m_tss.esp = m_stackTop;
  231. // Set up a separate stack for Ring0.
  232. m_kernelStack = kmalloc(defaultStackSize);
  233. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  234. m_tss.ss0 = 0x10;
  235. m_tss.esp0 = ring0StackTop;
  236. // HACK: Ring2 SS in the TSS is the current PID.
  237. m_tss.ss2 = m_pid;
  238. m_farPtr.offset = 0x98765432;
  239. ASSERT(m_pid);
  240. }
  241. Task::Task(void (*e)(), const char* n, IPC::Handle h, RingLevel ring)
  242. : m_name(n)
  243. , m_entry(e)
  244. , m_pid(next_pid++)
  245. , m_handle(h)
  246. , m_state(Runnable)
  247. , m_ring(ring)
  248. {
  249. m_fileHandles.append(nullptr);
  250. m_fileHandles.append(nullptr);
  251. m_fileHandles.append(nullptr);
  252. m_cwd = "/";
  253. m_nextRegion = LinearAddress(0x600000);
  254. Region* codeRegion = nullptr;
  255. if (!isRing0()) {
  256. codeRegion = allocateRegion(4096, "code");
  257. ASSERT(codeRegion);
  258. bool success = copyToZone(*codeRegion->zone, (void*)e, PAGE_SIZE);
  259. ASSERT(success);
  260. }
  261. memset(&m_tss, 0, sizeof(m_tss));
  262. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  263. if (ring == Ring3) {
  264. allocateLDT();
  265. }
  266. // Only IF is set when a task boots.
  267. m_tss.eflags = 0x0202;
  268. WORD dataSegment;
  269. WORD stackSegment;
  270. WORD codeSegment;
  271. if (ring == Ring0) {
  272. codeSegment = 0x08;
  273. dataSegment = 0x10;
  274. stackSegment = dataSegment;
  275. } else {
  276. codeSegment = 0x1b;
  277. dataSegment = 0x23;
  278. stackSegment = dataSegment;
  279. }
  280. m_tss.ds = dataSegment;
  281. m_tss.es = dataSegment;
  282. m_tss.fs = dataSegment;
  283. m_tss.gs = dataSegment;
  284. m_tss.ss = stackSegment;
  285. m_tss.cs = codeSegment;
  286. ASSERT((codeSegment & 3) == (stackSegment & 3));
  287. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  288. if (isRing0()) {
  289. m_tss.eip = (DWORD)m_entry;
  290. } else {
  291. m_tss.eip = codeRegion->linearAddress.get();
  292. }
  293. if (isRing0()) {
  294. // FIXME: This memory is leaked.
  295. // But uh, there's also no kernel task termination, so I guess it's not technically leaked...
  296. dword stackBottom = (dword)kmalloc(defaultStackSize);
  297. m_stackTop = (stackBottom + defaultStackSize) & 0xffffff8;
  298. m_tss.esp = m_stackTop;
  299. } else {
  300. auto* region = allocateRegion(defaultStackSize, "stack");
  301. ASSERT(region);
  302. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  303. m_tss.esp = m_stackTop;
  304. }
  305. if (ring == Ring3) {
  306. // Set up a separate stack for Ring0.
  307. // FIXME: Don't leak this stack either.
  308. m_kernelStack = kmalloc(defaultStackSize);
  309. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  310. m_tss.ss0 = 0x10;
  311. m_tss.esp0 = ring0StackTop;
  312. }
  313. // HACK: Ring2 SS in the TSS is the current PID.
  314. m_tss.ss2 = m_pid;
  315. m_farPtr.offset = 0x12345678;
  316. // Don't add task 0 (kernel dummy task) to task list.
  317. // FIXME: This doesn't belong in the constructor.
  318. if (m_pid == 0)
  319. return;
  320. // Add it to head of task list (meaning it's next to run too, ATM.)
  321. s_tasks->prepend(this);
  322. system.nprocess++;
  323. #ifdef TASK_DEBUG
  324. kprintf("Task %u (%s) spawned @ %p\n", m_pid, m_name.characters(), m_tss.eip);
  325. #endif
  326. }
  327. Task::~Task()
  328. {
  329. system.nprocess--;
  330. delete [] m_ldtEntries;
  331. m_ldtEntries = nullptr;
  332. if (m_kernelStack) {
  333. kfree(m_kernelStack);
  334. m_kernelStack = nullptr;
  335. }
  336. }
  337. void Task::dumpRegions()
  338. {
  339. kprintf("Task %s(%u) regions:\n", name().characters(), pid());
  340. kprintf("BEGIN END SIZE NAME\n");
  341. for (auto& region : m_regions) {
  342. kprintf("%x -- %x %x %s\n",
  343. region->linearAddress.get(),
  344. region->linearAddress.offset(region->size - 1).get(),
  345. region->size,
  346. region->name.characters());
  347. }
  348. }
  349. void Task::sys$exit(int status)
  350. {
  351. cli();
  352. #ifdef TASK_DEBUG
  353. kprintf("sys$exit: %s(%u) exit with status %d\n", name().characters(), pid(), status);
  354. #endif
  355. setState(Exiting);
  356. MemoryManager::the().unmapRegionsForTask(*this);
  357. s_tasks->remove(this);
  358. if (!scheduleNewTask()) {
  359. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  360. HANG;
  361. }
  362. s_deadTasks->append(this);
  363. switchNow();
  364. }
  365. void Task::taskDidCrash(Task* crashedTask)
  366. {
  367. // NOTE: This is called from an excepton handler, so interrupts are disabled.
  368. crashedTask->setState(Crashing);
  369. crashedTask->dumpRegions();
  370. s_tasks->remove(crashedTask);
  371. MemoryManager::the().unmapRegionsForTask(*crashedTask);
  372. if (!scheduleNewTask()) {
  373. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  374. HANG;
  375. }
  376. s_deadTasks->append(crashedTask);
  377. switchNow();
  378. }
  379. void Task::doHouseKeeping()
  380. {
  381. Task* next = nullptr;
  382. for (auto* deadTask = s_deadTasks->head(); deadTask; deadTask = next) {
  383. next = deadTask->next();
  384. delete deadTask;
  385. }
  386. s_deadTasks->clear();
  387. }
  388. void yield()
  389. {
  390. if (!current) {
  391. kprintf( "PANIC: yield() with !current" );
  392. HANG;
  393. }
  394. //kprintf("%s<%u> yield()\n", current->name().characters(), current->pid());
  395. InterruptDisabler disabler;
  396. if (!scheduleNewTask())
  397. return;
  398. //kprintf("yield() jumping to new task: %x (%s)\n", current->farPtr().selector, current->name().characters());
  399. switchNow();
  400. }
  401. void switchNow()
  402. {
  403. Descriptor& descriptor = getGDTEntry(current->selector());
  404. descriptor.type = 9;
  405. flushGDT();
  406. asm("sti\n"
  407. "ljmp *(%%eax)\n"
  408. ::"a"(&current->farPtr())
  409. );
  410. }
  411. bool scheduleNewTask()
  412. {
  413. if (!current) {
  414. // XXX: The first ever context_switch() goes to the idle task.
  415. // This to setup a reliable place we can return to.
  416. return contextSwitch(Task::kernelTask());
  417. }
  418. #if 0
  419. kprintf("Scheduler choices:\n");
  420. for (auto* task = s_tasks->head(); task; task = task->next()) {
  421. kprintf("%p %u %s\n", task, task->pid(), task->name().characters());
  422. }
  423. #endif
  424. // Check and unblock tasks whose wait conditions have been met.
  425. for (auto* task = s_tasks->head(); task; task = task->next()) {
  426. if (task->state() == Task::BlockedReceive && (task->ipc.msg.isValid() || task->ipc.notifies)) {
  427. task->unblock();
  428. continue;
  429. }
  430. if (task->state() == Task::BlockedSend) {
  431. Task* peer = Task::fromIPCHandle(task->ipc.dst);
  432. if (peer && peer->state() == Task::BlockedReceive && peer->acceptsMessageFrom(*task)) {
  433. task->unblock();
  434. continue;
  435. }
  436. }
  437. if (task->state() == Task::BlockedSleep) {
  438. if (task->wakeupTime() <= system.uptime) {
  439. task->unblock();
  440. continue;
  441. }
  442. }
  443. if (task->state() == Task::BlockedWait) {
  444. if (!Task::fromPID(task->waitee())) {
  445. task->unblock();
  446. continue;
  447. }
  448. }
  449. }
  450. auto* prevHead = s_tasks->head();
  451. for (;;) {
  452. // Move head to tail.
  453. s_tasks->append(s_tasks->removeHead());
  454. auto* task = s_tasks->head();
  455. if (task->state() == Task::Runnable || task->state() == Task::Running) {
  456. //kprintf("switch to %s (%p vs %p)\n", task->name().characters(), task, current);
  457. return contextSwitch(task);
  458. }
  459. if (task == prevHead) {
  460. // Back at task_head, nothing wants to run.
  461. kprintf("Nothing wants to run!\n");
  462. kprintf("PID OWNER STATE NSCHED NAME\n");
  463. for (auto* task = s_tasks->head(); task; task = task->next()) {
  464. kprintf("%w %w:%w %b %w %s\n",
  465. task->pid(),
  466. task->uid(),
  467. task->gid(),
  468. task->state(),
  469. task->timesScheduled(),
  470. task->name().characters());
  471. }
  472. kprintf("Switch to kernel task\n");
  473. return contextSwitch(Task::kernelTask());
  474. }
  475. }
  476. }
  477. static bool contextSwitch(Task* t)
  478. {
  479. //kprintf("c_s to %s (same:%u)\n", t->name().characters(), current == t);
  480. t->setTicksLeft(5);
  481. if (current == t)
  482. return false;
  483. // Some sanity checking to force a crash earlier.
  484. auto csRPL = t->tss().cs & 3;
  485. auto ssRPL = t->tss().ss & 3;
  486. if (csRPL != ssRPL) {
  487. kprintf("Fuckup! Switching from %s(%u) to %s(%u) has RPL mismatch\n",
  488. current->name().characters(), current->pid(),
  489. t->name().characters(), t->pid()
  490. );
  491. kprintf("code: %w:%x\n", t->tss().cs, t->tss().eip);
  492. kprintf(" stk: %w:%x\n", t->tss().ss, t->tss().esp);
  493. ASSERT(csRPL == ssRPL);
  494. }
  495. if (current) {
  496. // If the last task hasn't blocked (still marked as running),
  497. // mark it as runnable for the next round.
  498. if (current->state() == Task::Running)
  499. current->setState(Task::Runnable);
  500. bool success = MemoryManager::the().unmapRegionsForTask(*current);
  501. ASSERT(success);
  502. }
  503. bool success = MemoryManager::the().mapRegionsForTask(*t);
  504. ASSERT(success);
  505. current = t;
  506. t->setState(Task::Running);
  507. if (!t->selector())
  508. t->setSelector(allocateGDTEntry());
  509. auto& tssDescriptor = getGDTEntry(t->selector());
  510. tssDescriptor.limit_hi = 0;
  511. tssDescriptor.limit_lo = 0xFFFF;
  512. tssDescriptor.base_lo = (DWORD)(&t->tss()) & 0xFFFF;
  513. tssDescriptor.base_hi = ((DWORD)(&t->tss()) >> 16) & 0xFF;
  514. tssDescriptor.base_hi2 = ((DWORD)(&t->tss()) >> 24) & 0xFF;
  515. tssDescriptor.dpl = 0;
  516. tssDescriptor.segment_present = 1;
  517. tssDescriptor.granularity = 1;
  518. tssDescriptor.zero = 0;
  519. tssDescriptor.operation_size = 1;
  520. tssDescriptor.descriptor_type = 0;
  521. tssDescriptor.type = 11; // Busy TSS
  522. flushGDT();
  523. t->didSchedule();
  524. return true;
  525. }
  526. Task* Task::fromPID(pid_t pid)
  527. {
  528. for (auto* task = s_tasks->head(); task; task = task->next()) {
  529. if (task->pid() == pid)
  530. return task;
  531. }
  532. return nullptr;
  533. }
  534. Task* Task::fromIPCHandle(IPC::Handle handle)
  535. {
  536. for (auto* task = s_tasks->head(); task; task = task->next()) {
  537. if (task->handle() == handle)
  538. return task;
  539. }
  540. return nullptr;
  541. }
  542. FileHandle* Task::fileHandleIfExists(int fd)
  543. {
  544. if (fd < 0)
  545. return nullptr;
  546. if ((unsigned)fd < m_fileHandles.size())
  547. return m_fileHandles[fd].ptr();
  548. return nullptr;
  549. }
  550. ssize_t Task::sys$get_dir_entries(int fd, void* buffer, size_t size)
  551. {
  552. auto* handle = fileHandleIfExists(fd);
  553. if (!handle)
  554. return -1;
  555. return handle->get_dir_entries((byte*)buffer, size);
  556. }
  557. int Task::sys$seek(int fd, int offset)
  558. {
  559. auto* handle = fileHandleIfExists(fd);
  560. if (!handle)
  561. return -1;
  562. return handle->seek(offset, SEEK_SET);
  563. }
  564. ssize_t Task::sys$read(int fd, void* outbuf, size_t nread)
  565. {
  566. Task::checkSanity("Task::sys$read");
  567. #ifdef DEBUG_IO
  568. kprintf("Task::sys$read: called(%d, %p, %u)\n", fd, outbuf, nread);
  569. #endif
  570. auto* handle = fileHandleIfExists(fd);
  571. #ifdef DEBUG_IO
  572. kprintf("Task::sys$read: handle=%p\n", handle);
  573. #endif
  574. if (!handle) {
  575. kprintf("Task::sys$read: handle not found :(\n");
  576. return -1;
  577. }
  578. #ifdef DEBUG_IO
  579. kprintf("call read on handle=%p\n", handle);
  580. #endif
  581. nread = handle->read((byte*)outbuf, nread);
  582. #ifdef DEBUG_IO
  583. kprintf("Task::sys$read: nread=%u\n", nread);
  584. #endif
  585. return nread;
  586. }
  587. int Task::sys$close(int fd)
  588. {
  589. auto* handle = fileHandleIfExists(fd);
  590. if (!handle)
  591. return -1;
  592. // FIXME: Implement.
  593. return 0;
  594. }
  595. int Task::sys$lstat(const char* path, void* statbuf)
  596. {
  597. auto handle = VirtualFileSystem::the().open(move(path));
  598. if (!handle)
  599. return -1;
  600. handle->stat((Unix::stat*)statbuf);
  601. return 0;
  602. }
  603. int Task::sys$getcwd(char* buffer, size_t size)
  604. {
  605. if (size < m_cwd.length() + 1) {
  606. // FIXME: return -ERANGE;
  607. return -1;
  608. }
  609. memcpy(buffer, m_cwd.characters(), m_cwd.length());
  610. buffer[m_cwd.length()] = '\0';
  611. return 0;
  612. }
  613. int Task::sys$open(const char* path, size_t pathLength)
  614. {
  615. Task::checkSanity("sys$open");
  616. #ifdef DEBUG_IO
  617. kprintf("Task::sys$open(): PID=%u, path=%s {%u}\n", m_pid, path, pathLength);
  618. #endif
  619. auto* handle = openFile(String(path, pathLength));
  620. if (handle)
  621. return handle->fd();
  622. return -1;
  623. }
  624. FileHandle* Task::openFile(String&& path)
  625. {
  626. auto handle = VirtualFileSystem::the().open(move(path));
  627. if (!handle) {
  628. #ifdef DEBUG_IO
  629. kprintf("vfs::open() failed\n");
  630. #endif
  631. return nullptr;
  632. }
  633. handle->setFD(m_fileHandles.size());
  634. #ifdef DEBUG_IO
  635. kprintf("vfs::open() worked! handle=%p, fd=%d\n", handle.ptr(), handle->fd());
  636. #endif
  637. m_fileHandles.append(move(handle)); // FIXME: allow non-move Vector::append
  638. return m_fileHandles.last().ptr();
  639. }
  640. int Task::sys$kill(pid_t pid, int sig)
  641. {
  642. (void) sig;
  643. if (pid == 0) {
  644. // FIXME: Send to same-group processes.
  645. ASSERT(pid != 0);
  646. }
  647. if (pid == -1) {
  648. // FIXME: Send to all processes.
  649. ASSERT(pid != -1);
  650. }
  651. ASSERT_NOT_REACHED();
  652. Task* peer = Task::fromPID(pid);
  653. if (!peer) {
  654. // errno = ESRCH;
  655. return -1;
  656. }
  657. #if 0
  658. send(peer->handle(), IPC::Message(SYS_KILL, DataBuffer::copy((BYTE*)&sig, sizeof(sig))));
  659. IPC::Message response = receive(peer->handle());
  660. return *(int*)response.data();
  661. #endif
  662. return -1;
  663. }
  664. uid_t Task::sys$getuid()
  665. {
  666. return m_uid;
  667. }
  668. gid_t Task::sys$getgid()
  669. {
  670. return m_gid;
  671. }
  672. pid_t Task::sys$getpid()
  673. {
  674. return m_pid;
  675. }
  676. pid_t Task::sys$waitpid(pid_t waitee)
  677. {
  678. if (!Task::fromPID(waitee))
  679. return -1;
  680. m_waitee = waitee;
  681. block(BlockedWait);
  682. yield();
  683. return m_waitee;
  684. }
  685. bool Task::acceptsMessageFrom(Task& peer)
  686. {
  687. return !ipc.msg.isValid() && (ipc.src == IPC::Handle::Any || ipc.src == peer.handle());
  688. }
  689. void Task::unblock()
  690. {
  691. ASSERT(m_state != Task::Runnable && m_state != Task::Running);
  692. system.nblocked--;
  693. m_state = Task::Runnable;
  694. }
  695. void Task::block(Task::State state)
  696. {
  697. ASSERT(current->state() == Task::Running);
  698. system.nblocked++;
  699. current->setState(state);
  700. }
  701. void block(Task::State state)
  702. {
  703. current->block(state);
  704. yield();
  705. }
  706. void sleep(DWORD ticks)
  707. {
  708. ASSERT(current->state() == Task::Running);
  709. current->setWakeupTime(system.uptime + ticks);
  710. current->block(Task::BlockedSleep);
  711. yield();
  712. }
  713. void Task::sys$sleep(DWORD ticks)
  714. {
  715. ASSERT(this == current);
  716. sleep(ticks);
  717. }
  718. Task* Task::kernelTask()
  719. {
  720. ASSERT(s_kernelTask);
  721. return s_kernelTask;
  722. }
  723. void Task::setError(int error)
  724. {
  725. m_error = error;
  726. }
  727. Task::Region::Region(LinearAddress a, size_t s, RetainPtr<Zone>&& z, String&& n)
  728. : linearAddress(a)
  729. , size(s)
  730. , zone(move(z))
  731. , name(move(n))
  732. {
  733. }
  734. Task::Region::~Region()
  735. {
  736. }