Task.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870
  1. #include "types.h"
  2. #include "Task.h"
  3. #include "kmalloc.h"
  4. #include "VGA.h"
  5. #include "StdLib.h"
  6. #include "i386.h"
  7. #include "system.h"
  8. #include <VirtualFileSystem/FileHandle.h>
  9. #include <VirtualFileSystem/VirtualFileSystem.h>
  10. #include <ELFLoader/ExecSpace.h>
  11. #include "MemoryManager.h"
  12. #include "errno.h"
  13. #include "i8253.h"
  14. #include "RTC.h"
  15. //#define DEBUG_IO
  16. //#define TASK_DEBUG
  17. static const DWORD defaultStackSize = 16384;
  18. Task* current;
  19. Task* s_kernelTask;
  20. static pid_t next_pid;
  21. static InlineLinkedList<Task>* s_tasks;
  22. static InlineLinkedList<Task>* s_deadTasks;
  23. static String* s_hostname;
  24. static String& hostname(InterruptDisabler&)
  25. {
  26. ASSERT(s_hostname);
  27. return *s_hostname;
  28. }
  29. static bool contextSwitch(Task*);
  30. static void redoKernelTaskTSS()
  31. {
  32. if (!s_kernelTask->selector())
  33. s_kernelTask->setSelector(allocateGDTEntry());
  34. auto& tssDescriptor = getGDTEntry(s_kernelTask->selector());
  35. tssDescriptor.setBase(&s_kernelTask->tss());
  36. tssDescriptor.setLimit(0xffff);
  37. tssDescriptor.dpl = 0;
  38. tssDescriptor.segment_present = 1;
  39. tssDescriptor.granularity = 1;
  40. tssDescriptor.zero = 0;
  41. tssDescriptor.operation_size = 1;
  42. tssDescriptor.descriptor_type = 0;
  43. tssDescriptor.type = 9;
  44. flushGDT();
  45. }
  46. void Task::prepForIRETToNewTask()
  47. {
  48. redoKernelTaskTSS();
  49. s_kernelTask->tss().backlink = current->selector();
  50. loadTaskRegister(s_kernelTask->selector());
  51. }
  52. void Task::initialize()
  53. {
  54. current = nullptr;
  55. next_pid = 0;
  56. s_tasks = new InlineLinkedList<Task>;
  57. s_deadTasks = new InlineLinkedList<Task>;
  58. s_kernelTask = Task::createKernelTask(nullptr, "colonel");
  59. s_hostname = new String("birx");
  60. redoKernelTaskTSS();
  61. loadTaskRegister(s_kernelTask->selector());
  62. }
  63. #ifdef TASK_SANITY_CHECKS
  64. void Task::checkSanity(const char* msg)
  65. {
  66. char ch = current->name()[0];
  67. kprintf("<%p> %s{%u}%b [%d] :%b: sanity check <%s>\n",
  68. current->name().characters(),
  69. current->name().characters(),
  70. current->name().length(),
  71. current->name()[current->name().length() - 1],
  72. current->pid(), ch, msg ? msg : "");
  73. ASSERT((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'));
  74. }
  75. #endif
  76. void Task::allocateLDT()
  77. {
  78. ASSERT(!m_tss.ldt);
  79. static const WORD numLDTEntries = 4;
  80. WORD newLDTSelector = allocateGDTEntry();
  81. m_ldtEntries = new Descriptor[numLDTEntries];
  82. #if 0
  83. kprintf("new ldt selector = %x\n", newLDTSelector);
  84. kprintf("new ldt table at = %p\n", m_ldtEntries);
  85. kprintf("new ldt table size = %u\n", (numLDTEntries * 8) - 1);
  86. #endif
  87. Descriptor& ldt = getGDTEntry(newLDTSelector);
  88. ldt.setBase(m_ldtEntries);
  89. ldt.setLimit(numLDTEntries * 8 - 1);
  90. ldt.dpl = 0;
  91. ldt.segment_present = 1;
  92. ldt.granularity = 0;
  93. ldt.zero = 0;
  94. ldt.operation_size = 1;
  95. ldt.descriptor_type = 0;
  96. ldt.type = Descriptor::LDT;
  97. m_tss.ldt = newLDTSelector;
  98. }
  99. Vector<Task*> Task::allTasks()
  100. {
  101. InterruptDisabler disabler;
  102. Vector<Task*> tasks;
  103. tasks.ensureCapacity(s_tasks->sizeSlow());
  104. for (auto* task = s_tasks->head(); task; task = task->next())
  105. tasks.append(task);
  106. return tasks;
  107. }
  108. Task::Region* Task::allocateRegion(size_t size, String&& name)
  109. {
  110. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  111. auto zone = MemoryManager::the().createZone(size);
  112. ASSERT(zone);
  113. m_regions.append(make<Region>(m_nextRegion, size, move(zone), move(name)));
  114. m_nextRegion = m_nextRegion.offset(size).offset(16384);
  115. return m_regions.last().ptr();
  116. }
  117. bool Task::deallocateRegion(Region& region)
  118. {
  119. for (size_t i = 0; i < m_regions.size(); ++i) {
  120. if (m_regions[i].ptr() == &region) {
  121. // FIXME: This seems racy.
  122. MemoryManager::the().unmapRegion(*this, region);
  123. m_regions.remove(i);
  124. return true;
  125. }
  126. }
  127. return false;
  128. }
  129. Task::Region* Task::regionFromRange(LinearAddress laddr, size_t size)
  130. {
  131. for (auto& region : m_regions) {
  132. if (region->linearAddress == laddr && region->size == size)
  133. return region.ptr();
  134. }
  135. return nullptr;
  136. }
  137. void* Task::sys$mmap(void* addr, size_t size)
  138. {
  139. // FIXME: Implement mapping at a client-preferred address.
  140. ASSERT(addr == nullptr);
  141. auto* region = allocateRegion(size, "mmap");
  142. if (!region)
  143. return (void*)-1;
  144. MemoryManager::the().mapRegion(*this, *region);
  145. return (void*)region->linearAddress.get();
  146. }
  147. int Task::sys$munmap(void* addr, size_t size)
  148. {
  149. auto* region = regionFromRange(LinearAddress((dword)addr), size);
  150. if (!region)
  151. return -1;
  152. if (!deallocateRegion(*region))
  153. return -1;
  154. return 0;
  155. }
  156. int Task::sys$gethostname(char* buffer, size_t size)
  157. {
  158. String hn;
  159. {
  160. InterruptDisabler disabler;
  161. hn = hostname(disabler).isolatedCopy();
  162. }
  163. if (size < (hn.length() + 1))
  164. return -ENAMETOOLONG;
  165. memcpy(buffer, hn.characters(), size);
  166. return 0;
  167. }
  168. int Task::sys$spawn(const char* path, const char** args)
  169. {
  170. int error = 0;
  171. auto* child = Task::createUserTask(path, m_uid, m_gid, m_pid, error, args);
  172. if (child)
  173. return child->pid();
  174. return error;
  175. }
  176. Task* Task::createUserTask(const String& path, uid_t uid, gid_t gid, pid_t parentPID, int& error, const char** args)
  177. {
  178. auto parts = path.split('/');
  179. if (parts.isEmpty()) {
  180. error = -ENOENT;
  181. return nullptr;
  182. }
  183. RetainPtr<VirtualFileSystem::Node> cwd;
  184. {
  185. InterruptDisabler disabler;
  186. if (auto* parentTask = Task::fromPID(parentPID))
  187. cwd = parentTask->m_cwd.copyRef();
  188. }
  189. auto handle = VirtualFileSystem::the().open(path, cwd.ptr());
  190. if (!handle) {
  191. error = -ENOENT; // FIXME: Get a more detailed error from VFS.
  192. return nullptr;
  193. }
  194. auto elfData = handle->readEntireFile();
  195. if (!elfData) {
  196. error = -EIO; // FIXME: Get a more detailed error from VFS.
  197. return nullptr;
  198. }
  199. Vector<String> taskArguments;
  200. if (args) {
  201. for (size_t i = 0; args[i]; ++i) {
  202. taskArguments.append(args[i]);
  203. }
  204. } else {
  205. taskArguments.append(parts.last());
  206. }
  207. InterruptDisabler disabler; // FIXME: Get rid of this, jesus christ. This "critical" section is HUGE.
  208. Task* t = new Task(parts.takeLast(), uid, gid, parentPID, Ring3);
  209. t->m_arguments = move(taskArguments);
  210. ExecSpace space;
  211. space.hookableAlloc = [&] (const String& name, size_t size) {
  212. if (!size)
  213. return (void*)nullptr;
  214. size = ((size / 4096) + 1) * 4096;
  215. Region* region = t->allocateRegion(size, String(name));
  216. ASSERT(region);
  217. MemoryManager::the().mapRegion(*t, *region);
  218. return (void*)region->linearAddress.asPtr();
  219. };
  220. bool success = space.loadELF(move(elfData));
  221. if (!success) {
  222. // FIXME: This is ugly. If we need to do this, it should be at a different level.
  223. MemoryManager::the().unmapRegionsForTask(*t);
  224. MemoryManager::the().mapRegionsForTask(*current);
  225. delete t;
  226. kprintf("Failure loading ELF %s\n", path.characters());
  227. error = -ENOEXEC;
  228. return nullptr;
  229. }
  230. t->m_tss.eip = (dword)space.symbolPtr("_start");
  231. if (!t->m_tss.eip) {
  232. // FIXME: This is ugly. If we need to do this, it should be at a different level.
  233. MemoryManager::the().unmapRegionsForTask(*t);
  234. MemoryManager::the().mapRegionsForTask(*current);
  235. delete t;
  236. error = -ENOEXEC;
  237. return nullptr;
  238. }
  239. // FIXME: This is ugly. If we need to do this, it should be at a different level.
  240. MemoryManager::the().unmapRegionsForTask(*t);
  241. MemoryManager::the().mapRegionsForTask(*current);
  242. s_tasks->prepend(t);
  243. system.nprocess++;
  244. #ifdef TASK_DEBUG
  245. kprintf("Task %u (%s) spawned @ %p\n", t->pid(), t->name().characters(), t->m_tss.eip);
  246. #endif
  247. error = 0;
  248. return t;
  249. }
  250. int Task::sys$get_arguments(int* argc, char*** argv)
  251. {
  252. auto* region = allocateRegion(4096, "argv");
  253. if (!region)
  254. return -ENOMEM;
  255. MemoryManager::the().mapRegion(*this, *region);
  256. char* argpage = (char*)region->linearAddress.get();
  257. *argc = m_arguments.size();
  258. *argv = (char**)argpage;
  259. char* bufptr = argpage + (sizeof(char*) * m_arguments.size());
  260. for (size_t i = 0; i < m_arguments.size(); ++i) {
  261. (*argv)[i] = bufptr;
  262. memcpy(bufptr, m_arguments[i].characters(), m_arguments[i].length());
  263. bufptr += m_arguments[i].length();
  264. *(bufptr++) = '\0';
  265. }
  266. return 0;
  267. }
  268. Task* Task::createKernelTask(void (*e)(), String&& name)
  269. {
  270. Task* task = new Task(move(name), (uid_t)0, (gid_t)0, (pid_t)0, Ring0);
  271. task->m_tss.eip = (dword)e;
  272. if (task->pid() != 0) {
  273. InterruptDisabler disabler;
  274. s_tasks->prepend(task);
  275. system.nprocess++;
  276. #ifdef TASK_DEBUG
  277. kprintf("Kernel task %u (%s) spawned @ %p\n", task->pid(), task->name().characters(), task->m_tss.eip);
  278. #endif
  279. }
  280. return task;
  281. }
  282. Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring)
  283. : m_name(move(name))
  284. , m_pid(next_pid++)
  285. , m_uid(uid)
  286. , m_gid(gid)
  287. , m_state(Runnable)
  288. , m_ring(ring)
  289. , m_parentPID(parentPID)
  290. {
  291. m_fileHandles.append(nullptr); // stdin
  292. m_fileHandles.append(nullptr); // stdout
  293. m_fileHandles.append(nullptr); // stderr
  294. auto* parentTask = Task::fromPID(parentPID);
  295. if (parentTask)
  296. m_cwd = parentTask->m_cwd.copyRef();
  297. else
  298. m_cwd = nullptr;
  299. m_nextRegion = LinearAddress(0x600000);
  300. memset(&m_tss, 0, sizeof(m_tss));
  301. if (isRing3()) {
  302. memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
  303. allocateLDT();
  304. }
  305. // Only IF is set when a task boots.
  306. m_tss.eflags = 0x0202;
  307. word cs, ds, ss;
  308. if (isRing0()) {
  309. cs = 0x08;
  310. ds = 0x10;
  311. ss = 0x10;
  312. } else {
  313. cs = 0x1b;
  314. ds = 0x23;
  315. ss = 0x23;
  316. }
  317. m_tss.ds = ds;
  318. m_tss.es = ds;
  319. m_tss.fs = ds;
  320. m_tss.gs = ds;
  321. m_tss.ss = ss;
  322. m_tss.cs = cs;
  323. m_tss.cr3 = MemoryManager::the().pageDirectoryBase().get();
  324. if (isRing0()) {
  325. // FIXME: This memory is leaked.
  326. // But uh, there's also no kernel task termination, so I guess it's not technically leaked...
  327. dword stackBottom = (dword)kmalloc(defaultStackSize);
  328. m_stackTop = (stackBottom + defaultStackSize) & 0xffffff8;
  329. m_tss.esp = m_stackTop;
  330. } else {
  331. auto* region = allocateRegion(defaultStackSize, "stack");
  332. ASSERT(region);
  333. m_stackTop = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
  334. }
  335. m_tss.esp = m_stackTop;
  336. if (isRing3()) {
  337. // Ring3 tasks need a separate stack for Ring0.
  338. m_kernelStack = kmalloc(defaultStackSize);
  339. DWORD ring0StackTop = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
  340. m_tss.ss0 = 0x10;
  341. m_tss.esp0 = ring0StackTop;
  342. }
  343. // HACK: Ring2 SS in the TSS is the current PID.
  344. m_tss.ss2 = m_pid;
  345. m_farPtr.offset = 0x98765432;
  346. }
  347. Task::~Task()
  348. {
  349. InterruptDisabler disabler;
  350. system.nprocess--;
  351. delete [] m_ldtEntries;
  352. m_ldtEntries = nullptr;
  353. if (m_kernelStack) {
  354. kfree(m_kernelStack);
  355. m_kernelStack = nullptr;
  356. }
  357. }
  358. void Task::dumpRegions()
  359. {
  360. kprintf("Task %s(%u) regions:\n", name().characters(), pid());
  361. kprintf("BEGIN END SIZE NAME\n");
  362. for (auto& region : m_regions) {
  363. kprintf("%x -- %x %x %s\n",
  364. region->linearAddress.get(),
  365. region->linearAddress.offset(region->size - 1).get(),
  366. region->size,
  367. region->name.characters());
  368. }
  369. }
  370. void Task::sys$exit(int status)
  371. {
  372. cli();
  373. #ifdef TASK_DEBUG
  374. kprintf("sys$exit: %s(%u) exit with status %d\n", name().characters(), pid(), status);
  375. #endif
  376. setState(Exiting);
  377. MemoryManager::the().unmapRegionsForTask(*this);
  378. s_tasks->remove(this);
  379. if (!scheduleNewTask()) {
  380. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  381. HANG;
  382. }
  383. s_deadTasks->append(this);
  384. switchNow();
  385. }
  386. void Task::taskDidCrash(Task* crashedTask)
  387. {
  388. ASSERT_INTERRUPTS_DISABLED();
  389. crashedTask->setState(Crashing);
  390. crashedTask->dumpRegions();
  391. s_tasks->remove(crashedTask);
  392. MemoryManager::the().unmapRegionsForTask(*crashedTask);
  393. if (!scheduleNewTask()) {
  394. kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
  395. HANG;
  396. }
  397. s_deadTasks->append(crashedTask);
  398. switchNow();
  399. }
  400. void Task::doHouseKeeping()
  401. {
  402. InterruptDisabler disabler;
  403. if (s_deadTasks->isEmpty())
  404. return;
  405. Task* next = nullptr;
  406. for (auto* deadTask = s_deadTasks->head(); deadTask; deadTask = next) {
  407. next = deadTask->next();
  408. delete deadTask;
  409. }
  410. s_deadTasks->clear();
  411. }
  412. void yield()
  413. {
  414. if (!current) {
  415. kprintf( "PANIC: yield() with !current" );
  416. HANG;
  417. }
  418. //kprintf("%s<%u> yield()\n", current->name().characters(), current->pid());
  419. InterruptDisabler disabler;
  420. if (!scheduleNewTask())
  421. return;
  422. //kprintf("yield() jumping to new task: %x (%s)\n", current->farPtr().selector, current->name().characters());
  423. switchNow();
  424. }
  425. void switchNow()
  426. {
  427. Descriptor& descriptor = getGDTEntry(current->selector());
  428. descriptor.type = 9;
  429. flushGDT();
  430. asm("sti\n"
  431. "ljmp *(%%eax)\n"
  432. ::"a"(&current->farPtr())
  433. );
  434. }
  435. bool scheduleNewTask()
  436. {
  437. ASSERT_INTERRUPTS_DISABLED();
  438. if (!current) {
  439. // XXX: The first ever context_switch() goes to the idle task.
  440. // This to setup a reliable place we can return to.
  441. return contextSwitch(Task::kernelTask());
  442. }
  443. // Check and unblock tasks whose wait conditions have been met.
  444. for (auto* task = s_tasks->head(); task; task = task->next()) {
  445. if (task->state() == Task::BlockedSleep) {
  446. if (task->wakeupTime() <= system.uptime) {
  447. task->unblock();
  448. continue;
  449. }
  450. }
  451. if (task->state() == Task::BlockedWait) {
  452. if (!Task::fromPID(task->waitee())) {
  453. task->unblock();
  454. continue;
  455. }
  456. }
  457. if (task->state() == Task::BlockedRead) {
  458. ASSERT(task->m_fdBlockedOnRead != -1);
  459. if (task->m_fileHandles[task->m_fdBlockedOnRead]->hasDataAvailableForRead()) {
  460. task->unblock();
  461. continue;
  462. }
  463. }
  464. }
  465. #if 0
  466. kprintf("Scheduler choices:\n");
  467. for (auto* task = s_tasks->head(); task; task = task->next()) {
  468. if (task->state() == Task::BlockedWait || task->state() == Task::BlockedSleep)
  469. continue;
  470. kprintf("%w %s(%u)\n", task->state(), task->name().characters(), task->pid());
  471. }
  472. #endif
  473. auto* prevHead = s_tasks->head();
  474. for (;;) {
  475. // Move head to tail.
  476. s_tasks->append(s_tasks->removeHead());
  477. auto* task = s_tasks->head();
  478. if (task->state() == Task::Runnable || task->state() == Task::Running) {
  479. //kprintf("switch to %s (%p vs %p)\n", task->name().characters(), task, current);
  480. return contextSwitch(task);
  481. }
  482. if (task == prevHead) {
  483. // Back at task_head, nothing wants to run.
  484. kprintf("Nothing wants to run!\n");
  485. kprintf("PID OWNER STATE NSCHED NAME\n");
  486. for (auto* task = s_tasks->head(); task; task = task->next()) {
  487. kprintf("%w %w:%w %b %w %s\n",
  488. task->pid(),
  489. task->uid(),
  490. task->gid(),
  491. task->state(),
  492. task->timesScheduled(),
  493. task->name().characters());
  494. }
  495. kprintf("Switch to kernel task\n");
  496. return contextSwitch(Task::kernelTask());
  497. }
  498. }
  499. }
  500. static bool contextSwitch(Task* t)
  501. {
  502. //kprintf("c_s to %s (same:%u)\n", t->name().characters(), current == t);
  503. t->setTicksLeft(5);
  504. t->didSchedule();
  505. if (current == t)
  506. return false;
  507. // Some sanity checking to force a crash earlier.
  508. auto csRPL = t->tss().cs & 3;
  509. auto ssRPL = t->tss().ss & 3;
  510. if (csRPL != ssRPL) {
  511. kprintf("Fuckup! Switching from %s(%u) to %s(%u) has RPL mismatch\n",
  512. current->name().characters(), current->pid(),
  513. t->name().characters(), t->pid()
  514. );
  515. kprintf("code: %w:%x\n", t->tss().cs, t->tss().eip);
  516. kprintf(" stk: %w:%x\n", t->tss().ss, t->tss().esp);
  517. ASSERT(csRPL == ssRPL);
  518. }
  519. if (current) {
  520. // If the last task hasn't blocked (still marked as running),
  521. // mark it as runnable for the next round.
  522. if (current->state() == Task::Running)
  523. current->setState(Task::Runnable);
  524. bool success = MemoryManager::the().unmapRegionsForTask(*current);
  525. ASSERT(success);
  526. }
  527. bool success = MemoryManager::the().mapRegionsForTask(*t);
  528. ASSERT(success);
  529. current = t;
  530. t->setState(Task::Running);
  531. if (!t->selector())
  532. t->setSelector(allocateGDTEntry());
  533. auto& tssDescriptor = getGDTEntry(t->selector());
  534. tssDescriptor.limit_hi = 0;
  535. tssDescriptor.limit_lo = 0xFFFF;
  536. tssDescriptor.base_lo = (DWORD)(&t->tss()) & 0xFFFF;
  537. tssDescriptor.base_hi = ((DWORD)(&t->tss()) >> 16) & 0xFF;
  538. tssDescriptor.base_hi2 = ((DWORD)(&t->tss()) >> 24) & 0xFF;
  539. tssDescriptor.dpl = 0;
  540. tssDescriptor.segment_present = 1;
  541. tssDescriptor.granularity = 1;
  542. tssDescriptor.zero = 0;
  543. tssDescriptor.operation_size = 1;
  544. tssDescriptor.descriptor_type = 0;
  545. tssDescriptor.type = 11; // Busy TSS
  546. flushGDT();
  547. return true;
  548. }
  549. Task* Task::fromPID(pid_t pid)
  550. {
  551. for (auto* task = s_tasks->head(); task; task = task->next()) {
  552. if (task->pid() == pid)
  553. return task;
  554. }
  555. return nullptr;
  556. }
  557. FileHandle* Task::fileHandleIfExists(int fd)
  558. {
  559. if (fd < 0)
  560. return nullptr;
  561. if ((unsigned)fd < m_fileHandles.size())
  562. return m_fileHandles[fd].ptr();
  563. return nullptr;
  564. }
  565. ssize_t Task::sys$get_dir_entries(int fd, void* buffer, size_t size)
  566. {
  567. auto* handle = fileHandleIfExists(fd);
  568. if (!handle)
  569. return -1;
  570. return handle->get_dir_entries((byte*)buffer, size);
  571. }
  572. int Task::sys$seek(int fd, int offset)
  573. {
  574. auto* handle = fileHandleIfExists(fd);
  575. if (!handle)
  576. return -1;
  577. return handle->seek(offset, SEEK_SET);
  578. }
  579. ssize_t Task::sys$read(int fd, void* outbuf, size_t nread)
  580. {
  581. Task::checkSanity("Task::sys$read");
  582. #ifdef DEBUG_IO
  583. kprintf("Task::sys$read: called(%d, %p, %u)\n", fd, outbuf, nread);
  584. #endif
  585. auto* handle = fileHandleIfExists(fd);
  586. #ifdef DEBUG_IO
  587. kprintf("Task::sys$read: handle=%p\n", handle);
  588. #endif
  589. if (!handle) {
  590. kprintf("Task::sys$read: handle not found :(\n");
  591. return -1;
  592. }
  593. #ifdef DEBUG_IO
  594. kprintf("call read on handle=%p\n", handle);
  595. #endif
  596. if (handle->isBlocking()) {
  597. if (!handle->hasDataAvailableForRead()) {
  598. m_fdBlockedOnRead = fd;
  599. block(BlockedRead);
  600. yield();
  601. }
  602. }
  603. nread = handle->read((byte*)outbuf, nread);
  604. #ifdef DEBUG_IO
  605. kprintf("Task::sys$read: nread=%u\n", nread);
  606. #endif
  607. return nread;
  608. }
  609. int Task::sys$close(int fd)
  610. {
  611. auto* handle = fileHandleIfExists(fd);
  612. if (!handle)
  613. return -1;
  614. // FIXME: Implement.
  615. return 0;
  616. }
  617. int Task::sys$lstat(const char* path, void* statbuf)
  618. {
  619. auto handle = VirtualFileSystem::the().open(move(path), m_cwd.ptr());
  620. if (!handle)
  621. return -1;
  622. handle->stat((Unix::stat*)statbuf);
  623. return 0;
  624. }
  625. int Task::sys$chdir(const char* path)
  626. {
  627. auto handle = VirtualFileSystem::the().open(path, m_cwd.ptr());
  628. if (!handle)
  629. return -ENOENT; // FIXME: More detailed error.
  630. if (!handle->isDirectory())
  631. return -ENOTDIR;
  632. m_cwd = handle->vnode();
  633. kprintf("m_cwd <- %p (%u)\n", m_cwd.ptr(), handle->vnode()->inode.index());
  634. return 0;
  635. }
  636. int Task::sys$getcwd(char* buffer, size_t size)
  637. {
  638. // FIXME: Implement!
  639. return -ENOTIMPL;
  640. }
  641. int Task::sys$open(const char* path, size_t pathLength)
  642. {
  643. #ifdef DEBUG_IO
  644. kprintf("Task::sys$open(): PID=%u, path=%s {%u}\n", m_pid, path, pathLength);
  645. #endif
  646. if (m_fileHandles.size() >= m_maxFileHandles)
  647. return -EMFILE;
  648. auto handle = VirtualFileSystem::the().open(String(path, pathLength), m_cwd.ptr());
  649. if (!handle)
  650. return -ENOENT; // FIXME: Detailed error.
  651. int fd = m_fileHandles.size();
  652. handle->setFD(fd);
  653. m_fileHandles.append(move(handle));
  654. return fd;
  655. }
  656. int Task::sys$kill(pid_t pid, int sig)
  657. {
  658. (void) sig;
  659. if (pid == 0) {
  660. // FIXME: Send to same-group processes.
  661. ASSERT(pid != 0);
  662. }
  663. if (pid == -1) {
  664. // FIXME: Send to all processes.
  665. ASSERT(pid != -1);
  666. }
  667. ASSERT_NOT_REACHED();
  668. Task* peer = Task::fromPID(pid);
  669. if (!peer) {
  670. // errno = ESRCH;
  671. return -1;
  672. }
  673. return -1;
  674. }
  675. int Task::sys$sleep(unsigned seconds)
  676. {
  677. if (!seconds)
  678. return 0;
  679. sleep(seconds * TICKS_PER_SECOND);
  680. return 0;
  681. }
  682. int Task::sys$gettimeofday(timeval* tv)
  683. {
  684. InterruptDisabler disabler;
  685. auto now = RTC::now();
  686. tv->tv_sec = now;
  687. tv->tv_usec = 0;
  688. return 0;
  689. }
  690. uid_t Task::sys$getuid()
  691. {
  692. return m_uid;
  693. }
  694. gid_t Task::sys$getgid()
  695. {
  696. return m_gid;
  697. }
  698. pid_t Task::sys$getpid()
  699. {
  700. return m_pid;
  701. }
  702. pid_t Task::sys$waitpid(pid_t waitee)
  703. {
  704. InterruptDisabler disabler;
  705. if (!Task::fromPID(waitee))
  706. return -1;
  707. m_waitee = waitee;
  708. block(BlockedWait);
  709. yield();
  710. return m_waitee;
  711. }
  712. void Task::unblock()
  713. {
  714. ASSERT(m_state != Task::Runnable && m_state != Task::Running);
  715. system.nblocked--;
  716. m_state = Task::Runnable;
  717. }
  718. void Task::block(Task::State state)
  719. {
  720. ASSERT(current->state() == Task::Running);
  721. system.nblocked++;
  722. current->setState(state);
  723. }
  724. void block(Task::State state)
  725. {
  726. current->block(state);
  727. yield();
  728. }
  729. void sleep(DWORD ticks)
  730. {
  731. ASSERT(current->state() == Task::Running);
  732. current->setWakeupTime(system.uptime + ticks);
  733. current->block(Task::BlockedSleep);
  734. yield();
  735. }
  736. Task* Task::kernelTask()
  737. {
  738. ASSERT(s_kernelTask);
  739. return s_kernelTask;
  740. }
  741. Task::Region::Region(LinearAddress a, size_t s, RetainPtr<Zone>&& z, String&& n)
  742. : linearAddress(a)
  743. , size(s)
  744. , zone(move(z))
  745. , name(move(n))
  746. {
  747. }
  748. Task::Region::~Region()
  749. {
  750. }