Process.cpp 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581
  1. #include "types.h"
  2. #include "Process.h"
  3. #include "kmalloc.h"
  4. #include "StdLib.h"
  5. #include "i386.h"
  6. #include "system.h"
  7. #include <Kernel/FileDescriptor.h>
  8. #include <Kernel/VirtualFileSystem.h>
  9. #include <Kernel/NullDevice.h>
  10. #include "ELFLoader.h"
  11. #include "MemoryManager.h"
  12. #include "i8253.h"
  13. #include "RTC.h"
  14. #include <AK/StdLibExtras.h>
  15. #include <LibC/signal_numbers.h>
  16. #include <LibC/errno_numbers.h>
  17. #include "Syscall.h"
  18. #include "Scheduler.h"
  19. #include "FIFO.h"
  20. #include "KSyms.h"
  21. #include <Kernel/Socket.h>
  22. #include "MasterPTY.h"
  23. #include "elf.h"
  24. #include <AK/StringBuilder.h>
  25. //#define DEBUG_IO
  26. //#define TASK_DEBUG
  27. //#define FORK_DEBUG
  28. #define SIGNAL_DEBUG
  29. #define MAX_PROCESS_GIDS 32
  30. //#define SHARED_BUFFER_DEBUG
  31. static const dword default_kernel_stack_size = 16384;
  32. static const dword default_userspace_stack_size = 65536;
  33. static pid_t next_pid;
  34. InlineLinkedList<Process>* g_processes;
  35. static String* s_hostname;
  36. static Lock* s_hostname_lock;
  37. CoolGlobals* g_cool_globals;
  38. void Process::initialize()
  39. {
  40. #ifdef COOL_GLOBALS
  41. g_cool_globals = reinterpret_cast<CoolGlobals*>(0x1000);
  42. #endif
  43. next_pid = 0;
  44. g_processes = new InlineLinkedList<Process>;
  45. s_hostname = new String("courage");
  46. s_hostname_lock = new Lock;
  47. Scheduler::initialize();
  48. }
  49. Vector<pid_t> Process::all_pids()
  50. {
  51. Vector<pid_t> pids;
  52. pids.ensure_capacity(system.nprocess);
  53. InterruptDisabler disabler;
  54. for (auto* process = g_processes->head(); process; process = process->next())
  55. pids.append(process->pid());
  56. return pids;
  57. }
  58. Vector<Process*> Process::all_processes()
  59. {
  60. Vector<Process*> processes;
  61. processes.ensure_capacity(system.nprocess);
  62. InterruptDisabler disabler;
  63. for (auto* process = g_processes->head(); process; process = process->next())
  64. processes.append(process);
  65. return processes;
  66. }
  67. bool Process::in_group(gid_t gid) const
  68. {
  69. return m_gids.contains(gid);
  70. }
  71. Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name, bool is_readable, bool is_writable, bool commit)
  72. {
  73. size = PAGE_ROUND_UP(size);
  74. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  75. if (laddr.is_null()) {
  76. laddr = m_next_region;
  77. m_next_region = m_next_region.offset(size).offset(PAGE_SIZE);
  78. }
  79. laddr.mask(0xfffff000);
  80. m_regions.append(adopt(*new Region(laddr, size, move(name), is_readable, is_writable)));
  81. MM.map_region(*this, *m_regions.last());
  82. if (commit)
  83. m_regions.last()->commit();
  84. return m_regions.last().ptr();
  85. }
  86. Region* Process::allocate_file_backed_region(LinearAddress laddr, size_t size, RetainPtr<Inode>&& inode, String&& name, bool is_readable, bool is_writable)
  87. {
  88. size = PAGE_ROUND_UP(size);
  89. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  90. if (laddr.is_null()) {
  91. laddr = m_next_region;
  92. m_next_region = m_next_region.offset(size).offset(PAGE_SIZE);
  93. }
  94. laddr.mask(0xfffff000);
  95. m_regions.append(adopt(*new Region(laddr, size, move(inode), move(name), is_readable, is_writable)));
  96. MM.map_region(*this, *m_regions.last());
  97. return m_regions.last().ptr();
  98. }
  99. Region* Process::allocate_region_with_vmo(LinearAddress laddr, size_t size, Retained<VMObject>&& vmo, size_t offset_in_vmo, String&& name, bool is_readable, bool is_writable)
  100. {
  101. size = PAGE_ROUND_UP(size);
  102. // FIXME: This needs sanity checks. What if this overlaps existing regions?
  103. if (laddr.is_null()) {
  104. laddr = m_next_region;
  105. m_next_region = m_next_region.offset(size).offset(PAGE_SIZE);
  106. }
  107. laddr.mask(0xfffff000);
  108. offset_in_vmo &= PAGE_MASK;
  109. size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
  110. m_regions.append(adopt(*new Region(laddr, size, move(vmo), offset_in_vmo, move(name), is_readable, is_writable)));
  111. MM.map_region(*this, *m_regions.last());
  112. return m_regions.last().ptr();
  113. }
  114. bool Process::deallocate_region(Region& region)
  115. {
  116. InterruptDisabler disabler;
  117. for (int i = 0; i < m_regions.size(); ++i) {
  118. if (m_regions[i].ptr() == &region) {
  119. MM.unmap_region(region);
  120. m_regions.remove(i);
  121. return true;
  122. }
  123. }
  124. return false;
  125. }
  126. Region* Process::region_from_range(LinearAddress laddr, size_t size)
  127. {
  128. size = PAGE_ROUND_UP(size);
  129. for (auto& region : m_regions) {
  130. if (region->laddr() == laddr && region->size() == size)
  131. return region.ptr();
  132. }
  133. return nullptr;
  134. }
  135. int Process::sys$set_mmap_name(void* addr, size_t size, const char* name)
  136. {
  137. if (!validate_read_str(name))
  138. return -EFAULT;
  139. auto* region = region_from_range(LinearAddress((dword)addr), size);
  140. if (!region)
  141. return -EINVAL;
  142. region->set_name(String(name));
  143. return 0;
  144. }
  145. void* Process::sys$mmap(const Syscall::SC_mmap_params* params)
  146. {
  147. if (!validate_read(params, sizeof(Syscall::SC_mmap_params)))
  148. return (void*)-EFAULT;
  149. void* addr = (void*)params->addr;
  150. size_t size = params->size;
  151. int prot = params->prot;
  152. int flags = params->flags;
  153. int fd = params->fd;
  154. off_t offset = params->offset;
  155. if (size == 0)
  156. return (void*)-EINVAL;
  157. if ((dword)addr & ~PAGE_MASK)
  158. return (void*)-EINVAL;
  159. if (flags & MAP_ANONYMOUS) {
  160. auto* region = allocate_region(LinearAddress((dword)addr), size, "mmap", prot & PROT_READ, prot & PROT_WRITE, false);
  161. if (!region)
  162. return (void*)-ENOMEM;
  163. if (flags & MAP_SHARED)
  164. region->set_shared(true);
  165. return region->laddr().as_ptr();
  166. }
  167. if (offset & ~PAGE_MASK)
  168. return (void*)-EINVAL;
  169. auto* descriptor = file_descriptor(fd);
  170. if (!descriptor)
  171. return (void*)-EBADF;
  172. if (!descriptor->supports_mmap())
  173. return (void*)-ENODEV;
  174. auto* region = descriptor->mmap(*this, LinearAddress((dword)addr), offset, size, prot);
  175. if (!region)
  176. return (void*)-ENOMEM;
  177. if (flags & MAP_SHARED)
  178. region->set_shared(true);
  179. return region->laddr().as_ptr();
  180. }
  181. int Process::sys$munmap(void* addr, size_t size)
  182. {
  183. auto* region = region_from_range(LinearAddress((dword)addr), size);
  184. if (!region)
  185. return -EINVAL;
  186. if (!deallocate_region(*region))
  187. return -EINVAL;
  188. return 0;
  189. }
  190. int Process::sys$gethostname(char* buffer, ssize_t size)
  191. {
  192. if (size < 0)
  193. return -EINVAL;
  194. if (!validate_write(buffer, size))
  195. return -EFAULT;
  196. LOCKER(*s_hostname_lock);
  197. if (size < (s_hostname->length() + 1))
  198. return -ENAMETOOLONG;
  199. strcpy(buffer, s_hostname->characters());
  200. return 0;
  201. }
  202. Process* Process::fork(RegisterDump& regs)
  203. {
  204. auto* child = new Process(String(m_name), m_uid, m_gid, m_pid, m_ring, m_cwd.copy_ref(), m_executable.copy_ref(), m_tty, this);
  205. if (!child)
  206. return nullptr;
  207. memcpy(child->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
  208. child->m_signal_mask = m_signal_mask;
  209. #ifdef FORK_DEBUG
  210. dbgprintf("fork: child=%p\n", child);
  211. #endif
  212. for (auto& region : m_regions) {
  213. #ifdef FORK_DEBUG
  214. dbgprintf("fork: cloning Region{%p} \"%s\" L%x\n", region.ptr(), region->name().characters(), region->laddr().get());
  215. #endif
  216. auto cloned_region = region->clone();
  217. child->m_regions.append(move(cloned_region));
  218. MM.map_region(*child, *child->m_regions.last());
  219. if (region.ptr() == m_display_framebuffer_region.ptr())
  220. child->m_display_framebuffer_region = child->m_regions.last().copy_ref();
  221. }
  222. for (auto gid : m_gids)
  223. child->m_gids.set(gid);
  224. child->m_tss.eax = 0; // fork() returns 0 in the child :^)
  225. child->m_tss.ebx = regs.ebx;
  226. child->m_tss.ecx = regs.ecx;
  227. child->m_tss.edx = regs.edx;
  228. child->m_tss.ebp = regs.ebp;
  229. child->m_tss.esp = regs.esp_if_crossRing;
  230. child->m_tss.esi = regs.esi;
  231. child->m_tss.edi = regs.edi;
  232. child->m_tss.eflags = regs.eflags;
  233. child->m_tss.eip = regs.eip;
  234. child->m_tss.cs = regs.cs;
  235. child->m_tss.ds = regs.ds;
  236. child->m_tss.es = regs.es;
  237. child->m_tss.fs = regs.fs;
  238. child->m_tss.gs = regs.gs;
  239. child->m_tss.ss = regs.ss_if_crossRing;
  240. child->m_fpu_state = m_fpu_state;
  241. child->m_has_used_fpu = m_has_used_fpu;
  242. #ifdef FORK_DEBUG
  243. dbgprintf("fork: child will begin executing at %w:%x with stack %w:%x\n", child->m_tss.cs, child->m_tss.eip, child->m_tss.ss, child->m_tss.esp);
  244. #endif
  245. {
  246. InterruptDisabler disabler;
  247. g_processes->prepend(child);
  248. system.nprocess++;
  249. }
  250. #ifdef TASK_DEBUG
  251. kprintf("Process %u (%s) forked from %u @ %p\n", child->pid(), child->name().characters(), m_pid, child->m_tss.eip);
  252. #endif
  253. return child;
  254. }
  255. pid_t Process::sys$fork(RegisterDump& regs)
  256. {
  257. auto* child = fork(regs);
  258. ASSERT(child);
  259. return child->pid();
  260. }
  261. int Process::do_exec(String path, Vector<String> arguments, Vector<String> environment)
  262. {
  263. ASSERT(is_ring3());
  264. auto parts = path.split('/');
  265. if (parts.is_empty())
  266. return -ENOENT;
  267. int error;
  268. auto descriptor = VFS::the().open(path, error, 0, 0, cwd_inode());
  269. if (!descriptor) {
  270. ASSERT(error != 0);
  271. return error;
  272. }
  273. if (!descriptor->metadata().may_execute(m_euid, m_gids))
  274. return -EACCES;
  275. if (!descriptor->metadata().size) {
  276. kprintf("exec() of 0-length binaries not supported\n");
  277. return -ENOTIMPL;
  278. }
  279. dword entry_eip = 0;
  280. // FIXME: Is there a race here?
  281. auto old_page_directory = move(m_page_directory);
  282. m_page_directory = PageDirectory::create();
  283. #ifdef MM_DEBUG
  284. dbgprintf("Process %u exec: PD=%x created\n", pid(), m_page_directory.ptr());
  285. #endif
  286. ProcessPagingScope paging_scope(*this);
  287. auto vmo = VMObject::create_file_backed(descriptor->inode());
  288. #if 0
  289. // FIXME: I would like to do this, but it would instantiate all the damn inodes.
  290. vmo->set_name(descriptor->absolute_path());
  291. #else
  292. vmo->set_name("ELF image");
  293. #endif
  294. RetainPtr<Region> region = allocate_region_with_vmo(LinearAddress(), descriptor->metadata().size, vmo.copy_ref(), 0, "executable", true, false);
  295. // FIXME: Should we consider doing on-demand paging here? Is it actually useful?
  296. bool success = region->page_in();
  297. ASSERT(success);
  298. {
  299. // Okay, here comes the sleight of hand, pay close attention..
  300. auto old_regions = move(m_regions);
  301. ELFLoader loader(region->laddr().as_ptr());
  302. loader.map_section_hook = [&] (LinearAddress laddr, size_t size, size_t alignment, size_t offset_in_image, bool is_readable, bool is_writable, const String& name) {
  303. ASSERT(size);
  304. ASSERT(alignment == PAGE_SIZE);
  305. size = ((size / 4096) + 1) * 4096; // FIXME: Use ceil_div?
  306. (void) allocate_region_with_vmo(laddr, size, vmo.copy_ref(), offset_in_image, String(name), is_readable, is_writable);
  307. return laddr.as_ptr();
  308. };
  309. loader.alloc_section_hook = [&] (LinearAddress laddr, size_t size, size_t alignment, bool is_readable, bool is_writable, const String& name) {
  310. ASSERT(size);
  311. ASSERT(alignment == PAGE_SIZE);
  312. size = ((size / 4096) + 1) * 4096; // FIXME: Use ceil_div?
  313. (void) allocate_region(laddr, size, String(name), is_readable, is_writable);
  314. return laddr.as_ptr();
  315. };
  316. bool success = loader.load();
  317. if (!success) {
  318. m_page_directory = move(old_page_directory);
  319. // FIXME: RAII this somehow instead.
  320. ASSERT(current == this);
  321. MM.enter_process_paging_scope(*this);
  322. m_regions = move(old_regions);
  323. kprintf("sys$execve: Failure loading %s\n", path.characters());
  324. return -ENOEXEC;
  325. }
  326. entry_eip = loader.entry().get();
  327. if (!entry_eip) {
  328. m_page_directory = move(old_page_directory);
  329. // FIXME: RAII this somehow instead.
  330. ASSERT(current == this);
  331. MM.enter_process_paging_scope(*this);
  332. m_regions = move(old_regions);
  333. return -ENOEXEC;
  334. }
  335. }
  336. m_signal_stack_kernel_region = nullptr;
  337. m_signal_stack_user_region = nullptr;
  338. m_display_framebuffer_region = nullptr;
  339. set_default_signal_dispositions();
  340. m_signal_mask = 0xffffffff;
  341. m_pending_signals = 0;
  342. for (int i = 0; i < m_fds.size(); ++i) {
  343. auto& daf = m_fds[i];
  344. if (daf.descriptor && daf.flags & FD_CLOEXEC) {
  345. daf.descriptor->close();
  346. daf = { };
  347. }
  348. }
  349. // We cli() manually here because we don't want to get interrupted between do_exec() and Schedule::yield().
  350. // The reason is that the task redirection we've set up above will be clobbered by the timer IRQ.
  351. // If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec().
  352. if (current == this)
  353. cli();
  354. Scheduler::prepare_to_modify_tss(*this);
  355. m_name = parts.take_last();
  356. dword old_esp0 = m_tss.esp0;
  357. memset(&m_tss, 0, sizeof(m_tss));
  358. m_tss.eflags = 0x0202;
  359. m_tss.eip = entry_eip;
  360. m_tss.cs = 0x1b;
  361. m_tss.ds = 0x23;
  362. m_tss.es = 0x23;
  363. m_tss.fs = 0x23;
  364. m_tss.gs = 0x23;
  365. m_tss.ss = 0x23;
  366. m_tss.cr3 = page_directory().cr3();
  367. make_userspace_stack(move(arguments), move(environment));
  368. m_tss.ss0 = 0x10;
  369. m_tss.esp0 = old_esp0;
  370. m_tss.ss2 = m_pid;
  371. m_executable = descriptor->inode();
  372. if (descriptor->metadata().is_setuid())
  373. m_euid = descriptor->metadata().uid;
  374. if (descriptor->metadata().is_setgid())
  375. m_egid = descriptor->metadata().gid;
  376. #ifdef TASK_DEBUG
  377. kprintf("Process %u (%s) exec'd %s @ %p\n", pid(), name().characters(), path.characters(), m_tss.eip);
  378. #endif
  379. set_state(Skip1SchedulerPass);
  380. return 0;
  381. }
  382. void Process::make_userspace_stack(Vector<String> arguments, Vector<String> environment)
  383. {
  384. auto* region = allocate_region(LinearAddress(), default_userspace_stack_size, "stack");
  385. ASSERT(region);
  386. m_stack_top3 = region->laddr().offset(default_userspace_stack_size).get();
  387. m_tss.esp = m_stack_top3;
  388. char* stack_base = (char*)region->laddr().get();
  389. int argc = arguments.size();
  390. char** argv = (char**)stack_base;
  391. char** env = argv + arguments.size() + 1;
  392. char* bufptr = stack_base + (sizeof(char*) * (arguments.size() + 1)) + (sizeof(char*) * (environment.size() + 1));
  393. size_t total_blob_size = 0;
  394. for (auto& a : arguments)
  395. total_blob_size += a.length() + 1;
  396. for (auto& e : environment)
  397. total_blob_size += e.length() + 1;
  398. size_t total_meta_size = sizeof(char*) * (arguments.size() + 1) + sizeof(char*) * (environment.size() + 1);
  399. // FIXME: It would be better if this didn't make us panic.
  400. ASSERT((total_blob_size + total_meta_size) < default_userspace_stack_size);
  401. for (int i = 0; i < arguments.size(); ++i) {
  402. argv[i] = bufptr;
  403. memcpy(bufptr, arguments[i].characters(), arguments[i].length());
  404. bufptr += arguments[i].length();
  405. *(bufptr++) = '\0';
  406. }
  407. argv[arguments.size()] = nullptr;
  408. for (int i = 0; i < environment.size(); ++i) {
  409. env[i] = bufptr;
  410. memcpy(bufptr, environment[i].characters(), environment[i].length());
  411. bufptr += environment[i].length();
  412. *(bufptr++) = '\0';
  413. }
  414. env[environment.size()] = nullptr;
  415. // NOTE: The stack needs to be 16-byte aligned.
  416. push_value_on_stack((dword)env);
  417. push_value_on_stack((dword)argv);
  418. push_value_on_stack((dword)argc);
  419. push_value_on_stack(0);
  420. }
  421. int Process::exec(String path, Vector<String> arguments, Vector<String> environment)
  422. {
  423. // The bulk of exec() is done by do_exec(), which ensures that all locals
  424. // are cleaned up by the time we yield-teleport below.
  425. int rc = do_exec(move(path), move(arguments), move(environment));
  426. if (rc < 0)
  427. return rc;
  428. if (current == this) {
  429. Scheduler::yield();
  430. ASSERT_NOT_REACHED();
  431. }
  432. return 0;
  433. }
  434. int Process::sys$execve(const char* filename, const char** argv, const char** envp)
  435. {
  436. // NOTE: Be extremely careful with allocating any kernel memory in exec().
  437. // On success, the kernel stack will be lost.
  438. if (!validate_read_str(filename))
  439. return -EFAULT;
  440. if (argv) {
  441. if (!validate_read_typed(argv))
  442. return -EFAULT;
  443. for (size_t i = 0; argv[i]; ++i) {
  444. if (!validate_read_str(argv[i]))
  445. return -EFAULT;
  446. }
  447. }
  448. if (envp) {
  449. if (!validate_read_typed(envp))
  450. return -EFAULT;
  451. for (size_t i = 0; envp[i]; ++i) {
  452. if (!validate_read_str(envp[i]))
  453. return -EFAULT;
  454. }
  455. }
  456. String path(filename);
  457. Vector<String> arguments;
  458. Vector<String> environment;
  459. {
  460. auto parts = path.split('/');
  461. if (argv) {
  462. for (size_t i = 0; argv[i]; ++i) {
  463. arguments.append(argv[i]);
  464. }
  465. } else {
  466. arguments.append(parts.last());
  467. }
  468. if (envp) {
  469. for (size_t i = 0; envp[i]; ++i)
  470. environment.append(envp[i]);
  471. }
  472. }
  473. int rc = exec(move(path), move(arguments), move(environment));
  474. ASSERT(rc < 0); // We should never continue after a successful exec!
  475. return rc;
  476. }
  477. Process* Process::create_user_process(const String& path, uid_t uid, gid_t gid, pid_t parent_pid, int& error, Vector<String>&& arguments, Vector<String>&& environment, TTY* tty)
  478. {
  479. // FIXME: Don't split() the path twice (sys$spawn also does it...)
  480. auto parts = path.split('/');
  481. if (arguments.is_empty()) {
  482. arguments.append(parts.last());
  483. }
  484. RetainPtr<Inode> cwd;
  485. {
  486. InterruptDisabler disabler;
  487. if (auto* parent = Process::from_pid(parent_pid))
  488. cwd = parent->m_cwd.copy_ref();
  489. }
  490. if (!cwd)
  491. cwd = VFS::the().root_inode();
  492. auto* process = new Process(parts.take_last(), uid, gid, parent_pid, Ring3, move(cwd), nullptr, tty);
  493. error = process->exec(path, move(arguments), move(environment));
  494. if (error != 0) {
  495. delete process;
  496. return nullptr;
  497. }
  498. {
  499. InterruptDisabler disabler;
  500. g_processes->prepend(process);
  501. system.nprocess++;
  502. }
  503. #ifdef TASK_DEBUG
  504. kprintf("Process %u (%s) spawned @ %p\n", process->pid(), process->name().characters(), process->m_tss.eip);
  505. #endif
  506. error = 0;
  507. return process;
  508. }
  509. Process* Process::create_kernel_process(String&& name, void (*e)())
  510. {
  511. auto* process = new Process(move(name), (uid_t)0, (gid_t)0, (pid_t)0, Ring0);
  512. process->m_tss.eip = (dword)e;
  513. if (process->pid() != 0) {
  514. {
  515. InterruptDisabler disabler;
  516. g_processes->prepend(process);
  517. system.nprocess++;
  518. }
  519. #ifdef TASK_DEBUG
  520. kprintf("Kernel process %u (%s) spawned @ %p\n", process->pid(), process->name().characters(), process->m_tss.eip);
  521. #endif
  522. }
  523. return process;
  524. }
  525. Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring, RetainPtr<Inode>&& cwd, RetainPtr<Inode>&& executable, TTY* tty, Process* fork_parent)
  526. : m_name(move(name))
  527. , m_pid(next_pid++) // FIXME: RACE: This variable looks racy!
  528. , m_uid(uid)
  529. , m_gid(gid)
  530. , m_euid(uid)
  531. , m_egid(gid)
  532. , m_state(Runnable)
  533. , m_ring(ring)
  534. , m_cwd(move(cwd))
  535. , m_executable(move(executable))
  536. , m_tty(tty)
  537. , m_ppid(ppid)
  538. {
  539. set_default_signal_dispositions();
  540. memset(&m_fpu_state, 0, sizeof(FPUState));
  541. m_gids.set(m_gid);
  542. if (fork_parent) {
  543. m_sid = fork_parent->m_sid;
  544. m_pgid = fork_parent->m_pgid;
  545. } else {
  546. // FIXME: Use a ProcessHandle? Presumably we're executing *IN* the parent right now though..
  547. InterruptDisabler disabler;
  548. if (auto* parent = Process::from_pid(m_ppid)) {
  549. m_sid = parent->m_sid;
  550. m_pgid = parent->m_pgid;
  551. }
  552. }
  553. m_page_directory = PageDirectory::create();
  554. #ifdef MM_DEBUG
  555. dbgprintf("Process %u ctor: PD=%x created\n", pid(), m_page_directory.ptr());
  556. #endif
  557. if (fork_parent) {
  558. m_fds.resize(fork_parent->m_fds.size());
  559. for (int i = 0; i < fork_parent->m_fds.size(); ++i) {
  560. if (!fork_parent->m_fds[i].descriptor)
  561. continue;
  562. #ifdef FORK_DEBUG
  563. dbgprintf("fork: cloning fd %u... (%p) istty? %u\n", i, fork_parent->m_fds[i].descriptor.ptr(), fork_parent->m_fds[i].descriptor->is_tty());
  564. #endif
  565. m_fds[i].descriptor = fork_parent->m_fds[i].descriptor->clone();
  566. m_fds[i].flags = fork_parent->m_fds[i].flags;
  567. }
  568. } else {
  569. m_fds.resize(m_max_open_file_descriptors);
  570. auto& device_to_use_as_tty = tty ? (CharacterDevice&)*tty : NullDevice::the();
  571. int error;
  572. m_fds[0].set(device_to_use_as_tty.open(error, O_RDONLY));
  573. m_fds[1].set(device_to_use_as_tty.open(error, O_WRONLY));
  574. m_fds[2].set(device_to_use_as_tty.open(error, O_WRONLY));
  575. }
  576. if (fork_parent)
  577. m_next_region = fork_parent->m_next_region;
  578. else
  579. m_next_region = LinearAddress(0x10000000);
  580. if (fork_parent) {
  581. memcpy(&m_tss, &fork_parent->m_tss, sizeof(m_tss));
  582. } else {
  583. memset(&m_tss, 0, sizeof(m_tss));
  584. // Only IF is set when a process boots.
  585. m_tss.eflags = 0x0202;
  586. word cs, ds, ss;
  587. if (is_ring0()) {
  588. cs = 0x08;
  589. ds = 0x10;
  590. ss = 0x10;
  591. } else {
  592. cs = 0x1b;
  593. ds = 0x23;
  594. ss = 0x23;
  595. }
  596. m_tss.ds = ds;
  597. m_tss.es = ds;
  598. m_tss.fs = ds;
  599. m_tss.gs = ds;
  600. m_tss.ss = ss;
  601. m_tss.cs = cs;
  602. }
  603. m_tss.cr3 = page_directory().cr3();
  604. if (is_ring0()) {
  605. // FIXME: This memory is leaked.
  606. // But uh, there's also no kernel process termination, so I guess it's not technically leaked...
  607. dword stack_bottom = (dword)kmalloc_eternal(default_kernel_stack_size);
  608. m_stack_top0 = (stack_bottom + default_kernel_stack_size) & 0xffffff8;
  609. m_tss.esp = m_stack_top0;
  610. } else {
  611. // Ring3 processes need a separate stack for Ring0.
  612. m_kernel_stack = kmalloc(default_kernel_stack_size);
  613. m_stack_top0 = ((dword)m_kernel_stack + default_kernel_stack_size) & 0xffffff8;
  614. m_tss.ss0 = 0x10;
  615. m_tss.esp0 = m_stack_top0;
  616. }
  617. if (fork_parent) {
  618. m_sid = fork_parent->m_sid;
  619. m_pgid = fork_parent->m_pgid;
  620. m_umask = fork_parent->m_umask;
  621. }
  622. // HACK: Ring2 SS in the TSS is the current PID.
  623. m_tss.ss2 = m_pid;
  624. m_far_ptr.offset = 0x98765432;
  625. }
  626. Process::~Process()
  627. {
  628. {
  629. InterruptDisabler disabler;
  630. system.nprocess--;
  631. }
  632. if (g_last_fpu_process == this)
  633. g_last_fpu_process = nullptr;
  634. if (selector())
  635. gdt_free_entry(selector());
  636. if (m_kernel_stack) {
  637. kfree(m_kernel_stack);
  638. m_kernel_stack = nullptr;
  639. }
  640. }
  641. void Process::dump_regions()
  642. {
  643. kprintf("Process %s(%u) regions:\n", name().characters(), pid());
  644. kprintf("BEGIN END SIZE NAME\n");
  645. for (auto& region : m_regions) {
  646. kprintf("%x -- %x %x %s\n",
  647. region->laddr().get(),
  648. region->laddr().offset(region->size() - 1).get(),
  649. region->size(),
  650. region->name().characters());
  651. }
  652. }
  653. void Process::sys$exit(int status)
  654. {
  655. cli();
  656. #ifdef TASK_DEBUG
  657. kprintf("sys$exit: %s(%u) exit with status %d\n", name().characters(), pid(), status);
  658. #endif
  659. m_termination_status = status;
  660. m_termination_signal = 0;
  661. die();
  662. ASSERT_NOT_REACHED();
  663. }
  664. void Process::terminate_due_to_signal(byte signal)
  665. {
  666. ASSERT_INTERRUPTS_DISABLED();
  667. ASSERT(signal < 32);
  668. dbgprintf("terminate_due_to_signal %s(%u) <- %u\n", name().characters(), pid(), signal);
  669. m_termination_status = 0;
  670. m_termination_signal = signal;
  671. die();
  672. }
  673. void Process::send_signal(byte signal, Process* sender)
  674. {
  675. ASSERT(signal < 32);
  676. if (sender)
  677. dbgprintf("signal: %s(%u) sent %d to %s(%u)\n", sender->name().characters(), sender->pid(), signal, name().characters(), pid());
  678. else
  679. dbgprintf("signal: kernel sent %d to %s(%u)\n", signal, name().characters(), pid());
  680. InterruptDisabler disabler;
  681. m_pending_signals |= 1 << signal;
  682. }
  683. bool Process::has_unmasked_pending_signals() const
  684. {
  685. return m_pending_signals & m_signal_mask;
  686. }
  687. ShouldUnblockProcess Process::dispatch_one_pending_signal()
  688. {
  689. ASSERT_INTERRUPTS_DISABLED();
  690. dword signal_candidates = m_pending_signals & m_signal_mask;
  691. ASSERT(signal_candidates);
  692. byte signal = 0;
  693. for (; signal < 32; ++signal) {
  694. if (signal_candidates & (1 << signal)) {
  695. break;
  696. }
  697. }
  698. return dispatch_signal(signal);
  699. }
  700. ShouldUnblockProcess Process::dispatch_signal(byte signal)
  701. {
  702. ASSERT_INTERRUPTS_DISABLED();
  703. ASSERT(signal < 32);
  704. dbgprintf("dispatch_signal %s(%u) <- %u\n", name().characters(), pid(), signal);
  705. auto& action = m_signal_action_data[signal];
  706. // FIXME: Implement SA_SIGINFO signal handlers.
  707. ASSERT(!(action.flags & SA_SIGINFO));
  708. // Mark this signal as handled.
  709. m_pending_signals &= ~(1 << signal);
  710. if (signal == SIGSTOP) {
  711. set_state(Stopped);
  712. return ShouldUnblockProcess::No;
  713. }
  714. if (signal == SIGCONT && state() == Stopped) {
  715. set_state(Runnable);
  716. return ShouldUnblockProcess::Yes;
  717. }
  718. auto handler_laddr = action.handler_or_sigaction;
  719. if (handler_laddr.is_null()) {
  720. if (signal == SIGSTOP) {
  721. set_state(Stopped);
  722. } else {
  723. // FIXME: Is termination really always the appropriate action?
  724. terminate_due_to_signal(signal);
  725. }
  726. return ShouldUnblockProcess::No;
  727. }
  728. if (handler_laddr.as_ptr() == SIG_IGN) {
  729. dbgprintf("%s(%u) ignored signal %u\n", name().characters(), pid(), signal);
  730. return ShouldUnblockProcess::Yes;
  731. }
  732. Scheduler::prepare_to_modify_tss(*this);
  733. word ret_cs = m_tss.cs;
  734. dword ret_eip = m_tss.eip;
  735. dword ret_eflags = m_tss.eflags;
  736. bool interrupting_in_kernel = (ret_cs & 3) == 0;
  737. if (interrupting_in_kernel) {
  738. dbgprintf("dispatch_signal to %s(%u) in state=%s with return to %w:%x\n", name().characters(), pid(), to_string(state()), ret_cs, ret_eip);
  739. ASSERT(is_blocked());
  740. m_tss_to_resume_kernel = m_tss;
  741. #ifdef SIGNAL_DEBUG
  742. dbgprintf("resume tss pc: %w:%x\n", m_tss_to_resume_kernel.cs, m_tss_to_resume_kernel.eip);
  743. #endif
  744. }
  745. ProcessPagingScope paging_scope(*this);
  746. if (interrupting_in_kernel) {
  747. if (!m_signal_stack_user_region) {
  748. m_signal_stack_user_region = allocate_region(LinearAddress(), default_userspace_stack_size, "signal stack (user)");
  749. ASSERT(m_signal_stack_user_region);
  750. m_signal_stack_kernel_region = allocate_region(LinearAddress(), default_userspace_stack_size, "signal stack (kernel)");
  751. ASSERT(m_signal_stack_user_region);
  752. }
  753. m_tss.ss = 0x23;
  754. m_tss.esp = m_signal_stack_user_region->laddr().offset(default_userspace_stack_size).get() & 0xfffffff8;
  755. m_tss.ss0 = 0x10;
  756. m_tss.esp0 = m_signal_stack_kernel_region->laddr().offset(default_userspace_stack_size).get() & 0xfffffff8;
  757. push_value_on_stack(ret_eflags);
  758. push_value_on_stack(ret_cs);
  759. push_value_on_stack(ret_eip);
  760. } else {
  761. push_value_on_stack(ret_cs);
  762. push_value_on_stack(ret_eip);
  763. push_value_on_stack(ret_eflags);
  764. }
  765. // PUSHA
  766. dword old_esp = m_tss.esp;
  767. push_value_on_stack(m_tss.eax);
  768. push_value_on_stack(m_tss.ecx);
  769. push_value_on_stack(m_tss.edx);
  770. push_value_on_stack(m_tss.ebx);
  771. push_value_on_stack(old_esp);
  772. push_value_on_stack(m_tss.ebp);
  773. push_value_on_stack(m_tss.esi);
  774. push_value_on_stack(m_tss.edi);
  775. m_tss.eax = (dword)signal;
  776. m_tss.cs = 0x1b;
  777. m_tss.ds = 0x23;
  778. m_tss.es = 0x23;
  779. m_tss.fs = 0x23;
  780. m_tss.gs = 0x23;
  781. m_tss.eip = handler_laddr.get();
  782. if (m_return_to_ring3_from_signal_trampoline.is_null()) {
  783. // FIXME: This should be a global trampoline shared by all processes, not one created per process!
  784. // FIXME: Remap as read-only after setup.
  785. auto* region = allocate_region(LinearAddress(), PAGE_SIZE, "signal_trampoline", true, true);
  786. m_return_to_ring3_from_signal_trampoline = region->laddr();
  787. byte* code_ptr = m_return_to_ring3_from_signal_trampoline.as_ptr();
  788. *code_ptr++ = 0x61; // popa
  789. *code_ptr++ = 0x9d; // popf
  790. *code_ptr++ = 0xc3; // ret
  791. *code_ptr++ = 0x0f; // ud2
  792. *code_ptr++ = 0x0b;
  793. m_return_to_ring0_from_signal_trampoline = LinearAddress((dword)code_ptr);
  794. *code_ptr++ = 0x61; // popa
  795. *code_ptr++ = 0xb8; // mov eax, <dword>
  796. *(dword*)code_ptr = Syscall::SC_sigreturn;
  797. code_ptr += sizeof(dword);
  798. *code_ptr++ = 0xcd; // int 0x82
  799. *code_ptr++ = 0x82;
  800. *code_ptr++ = 0x0f; // ud2
  801. *code_ptr++ = 0x0b;
  802. // FIXME: For !SA_NODEFER, maybe we could do something like emitting an int 0x80 syscall here that
  803. // unmasks the signal so it can be received again? I guess then I would need one trampoline
  804. // per signal number if it's hard-coded, but it's just a few bytes per each.
  805. }
  806. if (interrupting_in_kernel)
  807. push_value_on_stack(m_return_to_ring0_from_signal_trampoline.get());
  808. else
  809. push_value_on_stack(m_return_to_ring3_from_signal_trampoline.get());
  810. // FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
  811. set_state(Skip1SchedulerPass);
  812. #ifdef SIGNAL_DEBUG
  813. dbgprintf("signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x\n", name().characters(), pid(), to_string(state()), m_tss.cs, m_tss.eip);
  814. #endif
  815. return ShouldUnblockProcess::Yes;
  816. }
  817. void Process::sys$sigreturn()
  818. {
  819. InterruptDisabler disabler;
  820. Scheduler::prepare_to_modify_tss(*this);
  821. m_tss = m_tss_to_resume_kernel;
  822. #ifdef SIGNAL_DEBUG
  823. dbgprintf("sys$sigreturn in %s(%u)\n", name().characters(), pid());
  824. dbgprintf(" -> resuming execution at %w:%x\n", m_tss.cs, m_tss.eip);
  825. #endif
  826. set_state(Skip1SchedulerPass);
  827. Scheduler::yield();
  828. kprintf("sys$sigreturn failed in %s(%u)\n", name().characters(), pid());
  829. ASSERT_NOT_REACHED();
  830. }
  831. void Process::push_value_on_stack(dword value)
  832. {
  833. m_tss.esp -= 4;
  834. dword* stack_ptr = (dword*)m_tss.esp;
  835. *stack_ptr = value;
  836. }
  837. void Process::crash()
  838. {
  839. ASSERT_INTERRUPTS_DISABLED();
  840. ASSERT(state() != Dead);
  841. m_termination_signal = SIGSEGV;
  842. dump_regions();
  843. ASSERT(is_ring3());
  844. die();
  845. ASSERT_NOT_REACHED();
  846. }
  847. Process* Process::from_pid(pid_t pid)
  848. {
  849. ASSERT_INTERRUPTS_DISABLED();
  850. for (auto* process = g_processes->head(); process; process = process->next()) {
  851. if (process->pid() == pid)
  852. return process;
  853. }
  854. return nullptr;
  855. }
  856. FileDescriptor* Process::file_descriptor(int fd)
  857. {
  858. if (fd < 0)
  859. return nullptr;
  860. if (fd < m_fds.size())
  861. return m_fds[fd].descriptor.ptr();
  862. return nullptr;
  863. }
  864. const FileDescriptor* Process::file_descriptor(int fd) const
  865. {
  866. if (fd < 0)
  867. return nullptr;
  868. if (fd < m_fds.size())
  869. return m_fds[fd].descriptor.ptr();
  870. return nullptr;
  871. }
  872. ssize_t Process::sys$get_dir_entries(int fd, void* buffer, ssize_t size)
  873. {
  874. if (size < 0)
  875. return -EINVAL;
  876. if (!validate_write(buffer, size))
  877. return -EFAULT;
  878. auto* descriptor = file_descriptor(fd);
  879. if (!descriptor)
  880. return -EBADF;
  881. return descriptor->get_dir_entries((byte*)buffer, size);
  882. }
  883. int Process::sys$lseek(int fd, off_t offset, int whence)
  884. {
  885. auto* descriptor = file_descriptor(fd);
  886. if (!descriptor)
  887. return -EBADF;
  888. return descriptor->seek(offset, whence);
  889. }
  890. int Process::sys$ttyname_r(int fd, char* buffer, ssize_t size)
  891. {
  892. if (size < 0)
  893. return -EINVAL;
  894. if (!validate_write(buffer, size))
  895. return -EFAULT;
  896. auto* descriptor = file_descriptor(fd);
  897. if (!descriptor)
  898. return -EBADF;
  899. if (!descriptor->is_tty())
  900. return -ENOTTY;
  901. auto tty_name = descriptor->tty()->tty_name();
  902. if (size < tty_name.length() + 1)
  903. return -ERANGE;
  904. strcpy(buffer, tty_name.characters());
  905. return 0;
  906. }
  907. int Process::sys$ptsname_r(int fd, char* buffer, ssize_t size)
  908. {
  909. if (size < 0)
  910. return -EINVAL;
  911. if (!validate_write(buffer, size))
  912. return -EFAULT;
  913. auto* descriptor = file_descriptor(fd);
  914. if (!descriptor)
  915. return -EBADF;
  916. auto* master_pty = descriptor->master_pty();
  917. if (!master_pty)
  918. return -ENOTTY;
  919. auto pts_name = master_pty->pts_name();
  920. if (size < pts_name.length() + 1)
  921. return -ERANGE;
  922. strcpy(buffer, pts_name.characters());
  923. return 0;
  924. }
  925. ssize_t Process::sys$write(int fd, const byte* data, ssize_t size)
  926. {
  927. if (size < 0)
  928. return -EINVAL;
  929. if (!validate_read(data, size))
  930. return -EFAULT;
  931. #ifdef DEBUG_IO
  932. dbgprintf("%s(%u): sys$write(%d, %p, %u)\n", name().characters(), pid(), fd, data, size);
  933. #endif
  934. auto* descriptor = file_descriptor(fd);
  935. if (!descriptor)
  936. return -EBADF;
  937. ssize_t nwritten = 0;
  938. if (descriptor->is_blocking()) {
  939. while (nwritten < (ssize_t)size) {
  940. #ifdef IO_DEBUG
  941. dbgprintf("while %u < %u\n", nwritten, size);
  942. #endif
  943. if (!descriptor->can_write(*this)) {
  944. #ifdef IO_DEBUG
  945. dbgprintf("block write on %d\n", fd);
  946. #endif
  947. m_blocked_fd = fd;
  948. block(BlockedWrite);
  949. Scheduler::yield();
  950. }
  951. ssize_t rc = descriptor->write(*this, (const byte*)data + nwritten, size - nwritten);
  952. #ifdef IO_DEBUG
  953. dbgprintf(" -> write returned %d\n", rc);
  954. #endif
  955. if (rc < 0) {
  956. // FIXME: Support returning partial nwritten with errno.
  957. ASSERT(nwritten == 0);
  958. return rc;
  959. }
  960. if (rc == 0)
  961. break;
  962. if (has_unmasked_pending_signals()) {
  963. block(BlockedSignal);
  964. Scheduler::yield();
  965. if (nwritten == 0)
  966. return -EINTR;
  967. }
  968. nwritten += rc;
  969. }
  970. } else {
  971. nwritten = descriptor->write(*this, (const byte*)data, size);
  972. }
  973. if (has_unmasked_pending_signals()) {
  974. block(BlockedSignal);
  975. Scheduler::yield();
  976. if (nwritten == 0)
  977. return -EINTR;
  978. }
  979. return nwritten;
  980. }
  981. ssize_t Process::sys$read(int fd, byte* buffer, ssize_t size)
  982. {
  983. if (size < 0)
  984. return -EINVAL;
  985. if (!validate_write(buffer, size))
  986. return -EFAULT;
  987. #ifdef DEBUG_IO
  988. dbgprintf("%s(%u) sys$read(%d, %p, %u)\n", name().characters(), pid(), fd, buffer, size);
  989. #endif
  990. auto* descriptor = file_descriptor(fd);
  991. if (!descriptor)
  992. return -EBADF;
  993. if (descriptor->is_blocking()) {
  994. if (!descriptor->can_read(*this)) {
  995. m_blocked_fd = fd;
  996. block(BlockedRead);
  997. Scheduler::yield();
  998. if (m_was_interrupted_while_blocked)
  999. return -EINTR;
  1000. }
  1001. }
  1002. return descriptor->read(*this, buffer, size);
  1003. }
  1004. int Process::sys$close(int fd)
  1005. {
  1006. auto* descriptor = file_descriptor(fd);
  1007. if (!descriptor)
  1008. return -EBADF;
  1009. int rc = descriptor->close();
  1010. m_fds[fd] = { };
  1011. return rc;
  1012. }
  1013. int Process::sys$utime(const char* pathname, const utimbuf* buf)
  1014. {
  1015. if (!validate_read_str(pathname))
  1016. return -EFAULT;
  1017. if (buf && !validate_read_typed(buf))
  1018. return -EFAULT;
  1019. time_t atime;
  1020. time_t mtime;
  1021. if (buf) {
  1022. atime = buf->actime;
  1023. mtime = buf->modtime;
  1024. } else {
  1025. auto now = RTC::now();
  1026. mtime = now;
  1027. atime = now;
  1028. }
  1029. return VFS::the().utime(String(pathname), cwd_inode(), atime, mtime);
  1030. }
  1031. int Process::sys$access(const char* pathname, int mode)
  1032. {
  1033. if (!validate_read_str(pathname))
  1034. return -EFAULT;
  1035. return VFS::the().access(String(pathname), mode, cwd_inode());
  1036. }
  1037. int Process::sys$fcntl(int fd, int cmd, dword arg)
  1038. {
  1039. (void) cmd;
  1040. (void) arg;
  1041. dbgprintf("sys$fcntl: fd=%d, cmd=%d, arg=%u\n", fd, cmd, arg);
  1042. auto* descriptor = file_descriptor(fd);
  1043. if (!descriptor)
  1044. return -EBADF;
  1045. // NOTE: The FD flags are not shared between FileDescriptor objects.
  1046. // This means that dup() doesn't copy the FD_CLOEXEC flag!
  1047. switch (cmd) {
  1048. case F_DUPFD: {
  1049. int arg_fd = (int)arg;
  1050. if (arg_fd < 0)
  1051. return -EINVAL;
  1052. int new_fd = -1;
  1053. for (int i = arg_fd; i < (int)m_max_open_file_descriptors; ++i) {
  1054. if (!m_fds[i]) {
  1055. new_fd = i;
  1056. break;
  1057. }
  1058. }
  1059. if (new_fd == -1)
  1060. return -EMFILE;
  1061. m_fds[new_fd].set(descriptor);
  1062. break;
  1063. }
  1064. case F_GETFD:
  1065. return m_fds[fd].flags;
  1066. case F_SETFD:
  1067. m_fds[fd].flags = arg;
  1068. break;
  1069. case F_GETFL:
  1070. return descriptor->file_flags();
  1071. case F_SETFL:
  1072. // FIXME: Support changing O_NONBLOCK
  1073. descriptor->set_file_flags(arg);
  1074. break;
  1075. default:
  1076. ASSERT_NOT_REACHED();
  1077. }
  1078. return 0;
  1079. }
  1080. int Process::sys$fstat(int fd, stat* statbuf)
  1081. {
  1082. if (!validate_write_typed(statbuf))
  1083. return -EFAULT;
  1084. auto* descriptor = file_descriptor(fd);
  1085. if (!descriptor)
  1086. return -EBADF;
  1087. return descriptor->fstat(statbuf);
  1088. }
  1089. int Process::sys$lstat(const char* path, stat* statbuf)
  1090. {
  1091. if (!validate_write_typed(statbuf))
  1092. return -EFAULT;
  1093. int error;
  1094. if (!VFS::the().stat(move(path), error, O_NOFOLLOW_NOERROR, cwd_inode(), *statbuf))
  1095. return error;
  1096. return 0;
  1097. }
  1098. int Process::sys$stat(const char* path, stat* statbuf)
  1099. {
  1100. if (!validate_write_typed(statbuf))
  1101. return -EFAULT;
  1102. int error;
  1103. if (!VFS::the().stat(move(path), error, 0, cwd_inode(), *statbuf))
  1104. return error;
  1105. return 0;
  1106. }
  1107. int Process::sys$readlink(const char* path, char* buffer, ssize_t size)
  1108. {
  1109. if (size < 0)
  1110. return -EINVAL;
  1111. if (!validate_read_str(path))
  1112. return -EFAULT;
  1113. if (!validate_write(buffer, size))
  1114. return -EFAULT;
  1115. int error;
  1116. auto descriptor = VFS::the().open(path, error, O_RDONLY | O_NOFOLLOW_NOERROR, 0, cwd_inode());
  1117. if (!descriptor)
  1118. return error;
  1119. if (!descriptor->metadata().is_symlink())
  1120. return -EINVAL;
  1121. auto contents = descriptor->read_entire_file(*this);
  1122. if (!contents)
  1123. return -EIO; // FIXME: Get a more detailed error from VFS.
  1124. memcpy(buffer, contents.pointer(), min(size, (ssize_t)contents.size()));
  1125. if (contents.size() + 1 < size)
  1126. buffer[contents.size()] = '\0';
  1127. return 0;
  1128. }
  1129. int Process::sys$chdir(const char* path)
  1130. {
  1131. if (!validate_read_str(path))
  1132. return -EFAULT;
  1133. int error;
  1134. auto descriptor = VFS::the().open(path, error, 0, 0, cwd_inode());
  1135. if (!descriptor)
  1136. return error;
  1137. if (!descriptor->is_directory())
  1138. return -ENOTDIR;
  1139. m_cwd = descriptor->inode();
  1140. return 0;
  1141. }
  1142. int Process::sys$getcwd(char* buffer, ssize_t size)
  1143. {
  1144. if (size < 0)
  1145. return -EINVAL;
  1146. if (!validate_write(buffer, size))
  1147. return -EFAULT;
  1148. auto path = VFS::the().absolute_path(cwd_inode());
  1149. if (path.is_null())
  1150. return -EINVAL;
  1151. if (size < path.length() + 1)
  1152. return -ERANGE;
  1153. strcpy(buffer, path.characters());
  1154. return 0;
  1155. }
  1156. size_t Process::number_of_open_file_descriptors() const
  1157. {
  1158. size_t count = 0;
  1159. for (auto& descriptor : m_fds) {
  1160. if (descriptor)
  1161. ++count;
  1162. }
  1163. return count;
  1164. }
  1165. int Process::sys$open(const char* path, int options, mode_t mode)
  1166. {
  1167. #ifdef DEBUG_IO
  1168. dbgprintf("%s(%u) sys$open(\"%s\")\n", name().characters(), pid(), path);
  1169. #endif
  1170. if (!validate_read_str(path))
  1171. return -EFAULT;
  1172. if (number_of_open_file_descriptors() >= m_max_open_file_descriptors)
  1173. return -EMFILE;
  1174. int error = -EWHYTHO;
  1175. auto descriptor = VFS::the().open(path, error, options, mode & ~umask(), cwd_inode());
  1176. if (!descriptor)
  1177. return error;
  1178. if (options & O_DIRECTORY && !descriptor->is_directory())
  1179. return -ENOTDIR; // FIXME: This should be handled by VFS::open.
  1180. if (options & O_NONBLOCK)
  1181. descriptor->set_blocking(false);
  1182. int fd = 0;
  1183. for (; fd < (int)m_max_open_file_descriptors; ++fd) {
  1184. if (!m_fds[fd])
  1185. break;
  1186. }
  1187. dword flags = (options & O_CLOEXEC) ? FD_CLOEXEC : 0;
  1188. m_fds[fd].set(move(descriptor), flags);
  1189. return fd;
  1190. }
  1191. int Process::alloc_fd()
  1192. {
  1193. int fd = -1;
  1194. for (int i = 0; i < (int)m_max_open_file_descriptors; ++i) {
  1195. if (!m_fds[i]) {
  1196. fd = i;
  1197. break;
  1198. }
  1199. }
  1200. return fd;
  1201. }
  1202. int Process::sys$pipe(int pipefd[2])
  1203. {
  1204. if (!validate_write_typed(pipefd))
  1205. return -EFAULT;
  1206. if (number_of_open_file_descriptors() + 2 > max_open_file_descriptors())
  1207. return -EMFILE;
  1208. auto fifo = FIFO::create();
  1209. int reader_fd = alloc_fd();
  1210. m_fds[reader_fd].set(FileDescriptor::create_pipe_reader(*fifo));
  1211. pipefd[0] = reader_fd;
  1212. int writer_fd = alloc_fd();
  1213. m_fds[writer_fd].set(FileDescriptor::create_pipe_writer(*fifo));
  1214. pipefd[1] = writer_fd;
  1215. return 0;
  1216. }
  1217. int Process::sys$killpg(int pgrp, int signum)
  1218. {
  1219. if (signum < 1 || signum >= 32)
  1220. return -EINVAL;
  1221. (void) pgrp;
  1222. ASSERT_NOT_REACHED();
  1223. }
  1224. int Process::sys$setuid(uid_t uid)
  1225. {
  1226. if (uid != m_uid && !is_superuser())
  1227. return -EPERM;
  1228. m_uid = uid;
  1229. m_euid = uid;
  1230. return 0;
  1231. }
  1232. int Process::sys$setgid(gid_t gid)
  1233. {
  1234. if (gid != m_gid && !is_superuser())
  1235. return -EPERM;
  1236. m_gid = gid;
  1237. m_egid = gid;
  1238. return 0;
  1239. }
  1240. unsigned Process::sys$alarm(unsigned seconds)
  1241. {
  1242. (void) seconds;
  1243. ASSERT_NOT_REACHED();
  1244. }
  1245. int Process::sys$uname(utsname* buf)
  1246. {
  1247. if (!validate_write_typed(buf))
  1248. return -EFAULT;
  1249. strcpy(buf->sysname, "Serenity");
  1250. strcpy(buf->release, "1.0-dev");
  1251. strcpy(buf->version, "FIXME");
  1252. strcpy(buf->machine, "i386");
  1253. LOCKER(*s_hostname_lock);
  1254. strncpy(buf->nodename, s_hostname->characters(), sizeof(utsname::nodename));
  1255. return 0;
  1256. }
  1257. int Process::sys$isatty(int fd)
  1258. {
  1259. auto* descriptor = file_descriptor(fd);
  1260. if (!descriptor)
  1261. return -EBADF;
  1262. if (!descriptor->is_tty())
  1263. return -ENOTTY;
  1264. return 1;
  1265. }
  1266. int Process::sys$kill(pid_t pid, int signal)
  1267. {
  1268. if (signal < 0 || signal >= 32)
  1269. return -EINVAL;
  1270. if (pid == 0) {
  1271. // FIXME: Send to same-group processes.
  1272. ASSERT(pid != 0);
  1273. }
  1274. if (pid == -1) {
  1275. // FIXME: Send to all processes.
  1276. ASSERT(pid != -1);
  1277. }
  1278. if (pid == m_pid) {
  1279. send_signal(signal, this);
  1280. Scheduler::yield();
  1281. return 0;
  1282. }
  1283. InterruptDisabler disabler;
  1284. auto* peer = Process::from_pid(pid);
  1285. if (!peer)
  1286. return -ESRCH;
  1287. // FIXME: Allow sending SIGCONT to everyone in the process group.
  1288. // FIXME: Should setuid processes have some special treatment here?
  1289. if (!is_superuser() && m_euid != peer->m_uid && m_uid != peer->m_uid)
  1290. return -EPERM;
  1291. if (peer->is_ring0() && signal == SIGKILL) {
  1292. kprintf("%s(%u) attempted to send SIGKILL to ring 0 process %s(%u)\n", name().characters(), m_pid, peer->name().characters(), peer->pid());
  1293. return -EPERM;
  1294. }
  1295. peer->send_signal(signal, this);
  1296. return 0;
  1297. }
  1298. int Process::sys$usleep(useconds_t usec)
  1299. {
  1300. if (!usec)
  1301. return 0;
  1302. sleep(usec / 1000);
  1303. if (m_wakeup_time > system.uptime) {
  1304. ASSERT(m_was_interrupted_while_blocked);
  1305. dword ticks_left_until_original_wakeup_time = m_wakeup_time - system.uptime;
  1306. return ticks_left_until_original_wakeup_time / TICKS_PER_SECOND;
  1307. }
  1308. return 0;
  1309. }
  1310. int Process::sys$sleep(unsigned seconds)
  1311. {
  1312. if (!seconds)
  1313. return 0;
  1314. sleep(seconds * TICKS_PER_SECOND);
  1315. if (m_wakeup_time > system.uptime) {
  1316. ASSERT(m_was_interrupted_while_blocked);
  1317. dword ticks_left_until_original_wakeup_time = m_wakeup_time - system.uptime;
  1318. return ticks_left_until_original_wakeup_time / TICKS_PER_SECOND;
  1319. }
  1320. return 0;
  1321. }
  1322. int Process::sys$gettimeofday(timeval* tv)
  1323. {
  1324. if (!validate_write_typed(tv))
  1325. return -EFAULT;
  1326. auto now = RTC::now();
  1327. tv->tv_sec = now;
  1328. tv->tv_usec = PIT::ticks_since_boot() % 1000;
  1329. return 0;
  1330. }
  1331. uid_t Process::sys$getuid()
  1332. {
  1333. return m_uid;
  1334. }
  1335. gid_t Process::sys$getgid()
  1336. {
  1337. return m_gid;
  1338. }
  1339. uid_t Process::sys$geteuid()
  1340. {
  1341. return m_euid;
  1342. }
  1343. gid_t Process::sys$getegid()
  1344. {
  1345. return m_egid;
  1346. }
  1347. pid_t Process::sys$getpid()
  1348. {
  1349. return m_pid;
  1350. }
  1351. pid_t Process::sys$getppid()
  1352. {
  1353. return m_ppid;
  1354. }
  1355. mode_t Process::sys$umask(mode_t mask)
  1356. {
  1357. auto old_mask = m_umask;
  1358. m_umask = mask & 0777;
  1359. return old_mask;
  1360. }
  1361. int Process::reap(Process& process)
  1362. {
  1363. InterruptDisabler disabler;
  1364. int exit_status = (process.m_termination_status << 8) | process.m_termination_signal;
  1365. if (process.ppid()) {
  1366. auto* parent = Process::from_pid(process.ppid());
  1367. if (parent) {
  1368. parent->m_ticks_in_user_for_dead_children += process.m_ticks_in_user + process.m_ticks_in_user_for_dead_children;
  1369. parent->m_ticks_in_kernel_for_dead_children += process.m_ticks_in_kernel + process.m_ticks_in_kernel_for_dead_children;
  1370. }
  1371. }
  1372. dbgprintf("reap: %s(%u) {%s}\n", process.name().characters(), process.pid(), to_string(process.state()));
  1373. ASSERT(process.state() == Dead);
  1374. g_processes->remove(&process);
  1375. delete &process;
  1376. return exit_status;
  1377. }
  1378. pid_t Process::sys$waitpid(pid_t waitee, int* wstatus, int options)
  1379. {
  1380. dbgprintf("sys$waitpid(%d, %p, %d)\n", waitee, wstatus, options);
  1381. // FIXME: Respect options
  1382. (void) options;
  1383. if (wstatus)
  1384. if (!validate_write_typed(wstatus))
  1385. return -EFAULT;
  1386. int dummy_wstatus;
  1387. int& exit_status = wstatus ? *wstatus : dummy_wstatus;
  1388. {
  1389. InterruptDisabler disabler;
  1390. if (waitee != -1 && !Process::from_pid(waitee))
  1391. return -ECHILD;
  1392. }
  1393. if (options & WNOHANG) {
  1394. if (waitee == -1) {
  1395. pid_t reaped_pid = 0;
  1396. InterruptDisabler disabler;
  1397. for_each_child([&reaped_pid, &exit_status] (Process& process) {
  1398. if (process.state() == Dead) {
  1399. reaped_pid = process.pid();
  1400. exit_status = reap(process);
  1401. }
  1402. return true;
  1403. });
  1404. return reaped_pid;
  1405. } else {
  1406. ASSERT(waitee > 0); // FIXME: Implement other PID specs.
  1407. InterruptDisabler disabler;
  1408. auto* waitee_process = Process::from_pid(waitee);
  1409. if (!waitee_process)
  1410. return -ECHILD;
  1411. if (waitee_process->state() == Dead) {
  1412. exit_status = reap(*waitee_process);
  1413. return waitee;
  1414. }
  1415. return 0;
  1416. }
  1417. }
  1418. m_waitee_pid = waitee;
  1419. block(BlockedWait);
  1420. Scheduler::yield();
  1421. if (m_was_interrupted_while_blocked)
  1422. return -EINTR;
  1423. Process* waitee_process;
  1424. {
  1425. InterruptDisabler disabler;
  1426. // NOTE: If waitee was -1, m_waitee will have been filled in by the scheduler.
  1427. waitee_process = Process::from_pid(m_waitee_pid);
  1428. }
  1429. ASSERT(waitee_process);
  1430. exit_status = reap(*waitee_process);
  1431. return m_waitee_pid;
  1432. }
  1433. void Process::unblock()
  1434. {
  1435. if (current == this) {
  1436. system.nblocked--;
  1437. m_state = Process::Running;
  1438. return;
  1439. }
  1440. ASSERT(m_state != Process::Runnable && m_state != Process::Running);
  1441. system.nblocked--;
  1442. m_state = Process::Runnable;
  1443. }
  1444. void Process::block(Process::State new_state)
  1445. {
  1446. if (state() != Process::Running) {
  1447. kprintf("Process::block: %s(%u) block(%u/%s) with state=%u/%s\n", name().characters(), pid(), new_state, to_string(new_state), state(), to_string(state()));
  1448. }
  1449. ASSERT(state() == Process::Running);
  1450. system.nblocked++;
  1451. m_was_interrupted_while_blocked = false;
  1452. set_state(new_state);
  1453. }
  1454. void block(Process::State state)
  1455. {
  1456. current->block(state);
  1457. Scheduler::yield();
  1458. }
  1459. void sleep(dword ticks)
  1460. {
  1461. ASSERT(current->state() == Process::Running);
  1462. current->set_wakeup_time(system.uptime + ticks);
  1463. current->block(Process::BlockedSleep);
  1464. Scheduler::yield();
  1465. }
  1466. enum class KernelMemoryCheckResult {
  1467. NotInsideKernelMemory,
  1468. AccessGranted,
  1469. AccessDenied
  1470. };
  1471. static KernelMemoryCheckResult check_kernel_memory_access(LinearAddress laddr, bool is_write)
  1472. {
  1473. auto* kernel_elf_header = (Elf32_Ehdr*)0xf000;
  1474. auto* kernel_program_headers = (Elf32_Phdr*)(0xf000 + kernel_elf_header->e_phoff);
  1475. for (unsigned i = 0; i < kernel_elf_header->e_phnum; ++i) {
  1476. auto& segment = kernel_program_headers[i];
  1477. if (segment.p_type != PT_LOAD || !segment.p_vaddr || !segment.p_memsz)
  1478. continue;
  1479. if (laddr.get() < segment.p_vaddr || laddr.get() > (segment.p_vaddr + segment.p_memsz))
  1480. continue;
  1481. if (is_write && !(kernel_program_headers[i].p_flags & PF_W))
  1482. return KernelMemoryCheckResult::AccessDenied;
  1483. if (!is_write && !(kernel_program_headers[i].p_flags & PF_R))
  1484. return KernelMemoryCheckResult::AccessDenied;
  1485. return KernelMemoryCheckResult::AccessGranted;
  1486. }
  1487. return KernelMemoryCheckResult::NotInsideKernelMemory;
  1488. }
  1489. bool Process::validate_read_from_kernel(LinearAddress laddr) const
  1490. {
  1491. // We check extra carefully here since the first 4MB of the address space is identity-mapped.
  1492. // This code allows access outside of the known used address ranges to get caught.
  1493. auto kmc_result = check_kernel_memory_access(laddr, false);
  1494. if (kmc_result == KernelMemoryCheckResult::AccessGranted)
  1495. return true;
  1496. if (kmc_result == KernelMemoryCheckResult::AccessDenied)
  1497. return false;
  1498. if (is_kmalloc_address(laddr.as_ptr()))
  1499. return true;
  1500. return validate_read(laddr.as_ptr(), 1);
  1501. }
  1502. bool Process::validate_read_str(const char* str)
  1503. {
  1504. if (!validate_read(str, 1))
  1505. return false;
  1506. return validate_read(str, strlen(str) + 1);
  1507. }
  1508. bool Process::validate_read(const void* address, ssize_t size) const
  1509. {
  1510. ASSERT(size >= 0);
  1511. LinearAddress first_address((dword)address);
  1512. LinearAddress last_address = first_address.offset(size - 1);
  1513. if (is_ring0()) {
  1514. auto kmc_result = check_kernel_memory_access(first_address, false);
  1515. if (kmc_result == KernelMemoryCheckResult::AccessGranted)
  1516. return true;
  1517. if (kmc_result == KernelMemoryCheckResult::AccessDenied)
  1518. return false;
  1519. if (is_kmalloc_address(address))
  1520. return true;
  1521. }
  1522. ASSERT(size);
  1523. if (!size)
  1524. return false;
  1525. if (first_address.page_base() != last_address.page_base()) {
  1526. if (!MM.validate_user_read(*this, last_address))
  1527. return false;
  1528. }
  1529. return MM.validate_user_read(*this, first_address);
  1530. }
  1531. bool Process::validate_write(void* address, ssize_t size) const
  1532. {
  1533. ASSERT(size >= 0);
  1534. LinearAddress first_address((dword)address);
  1535. LinearAddress last_address = first_address.offset(size - 1);
  1536. if (is_ring0()) {
  1537. if (is_kmalloc_address(address))
  1538. return true;
  1539. auto kmc_result = check_kernel_memory_access(first_address, true);
  1540. if (kmc_result == KernelMemoryCheckResult::AccessGranted)
  1541. return true;
  1542. if (kmc_result == KernelMemoryCheckResult::AccessDenied)
  1543. return false;
  1544. }
  1545. if (!size)
  1546. return false;
  1547. if (first_address.page_base() != last_address.page_base()) {
  1548. if (!MM.validate_user_write(*this, last_address))
  1549. return false;
  1550. }
  1551. return MM.validate_user_write(*this, last_address);
  1552. }
  1553. pid_t Process::sys$getsid(pid_t pid)
  1554. {
  1555. if (pid == 0)
  1556. return m_sid;
  1557. InterruptDisabler disabler;
  1558. auto* process = Process::from_pid(pid);
  1559. if (!process)
  1560. return -ESRCH;
  1561. if (m_sid != process->m_sid)
  1562. return -EPERM;
  1563. return process->m_sid;
  1564. }
  1565. pid_t Process::sys$setsid()
  1566. {
  1567. InterruptDisabler disabler;
  1568. bool found_process_with_same_pgid_as_my_pid = false;
  1569. Process::for_each_in_pgrp(pid(), [&] (auto&) {
  1570. found_process_with_same_pgid_as_my_pid = true;
  1571. return false;
  1572. });
  1573. if (found_process_with_same_pgid_as_my_pid)
  1574. return -EPERM;
  1575. m_sid = m_pid;
  1576. m_pgid = m_pid;
  1577. return m_sid;
  1578. }
  1579. pid_t Process::sys$getpgid(pid_t pid)
  1580. {
  1581. if (pid == 0)
  1582. return m_pgid;
  1583. InterruptDisabler disabler; // FIXME: Use a ProcessHandle
  1584. auto* process = Process::from_pid(pid);
  1585. if (!process)
  1586. return -ESRCH;
  1587. return process->m_pgid;
  1588. }
  1589. pid_t Process::sys$getpgrp()
  1590. {
  1591. return m_pgid;
  1592. }
  1593. static pid_t get_sid_from_pgid(pid_t pgid)
  1594. {
  1595. InterruptDisabler disabler;
  1596. auto* group_leader = Process::from_pid(pgid);
  1597. if (!group_leader)
  1598. return -1;
  1599. return group_leader->sid();
  1600. }
  1601. int Process::sys$setpgid(pid_t specified_pid, pid_t specified_pgid)
  1602. {
  1603. InterruptDisabler disabler; // FIXME: Use a ProcessHandle
  1604. pid_t pid = specified_pid ? specified_pid : m_pid;
  1605. if (specified_pgid < 0)
  1606. return -EINVAL;
  1607. auto* process = Process::from_pid(pid);
  1608. if (!process)
  1609. return -ESRCH;
  1610. pid_t new_pgid = specified_pgid ? specified_pgid : process->m_pid;
  1611. pid_t current_sid = get_sid_from_pgid(process->m_pgid);
  1612. pid_t new_sid = get_sid_from_pgid(new_pgid);
  1613. if (current_sid != new_sid) {
  1614. // Can't move a process between sessions.
  1615. return -EPERM;
  1616. }
  1617. // FIXME: There are more EPERM conditions to check for here..
  1618. process->m_pgid = new_pgid;
  1619. return 0;
  1620. }
  1621. int Process::sys$ioctl(int fd, unsigned request, unsigned arg)
  1622. {
  1623. auto* descriptor = file_descriptor(fd);
  1624. if (!descriptor)
  1625. return -EBADF;
  1626. if (descriptor->is_socket() && request == 413) {
  1627. auto* pid = (pid_t*)arg;
  1628. if (!validate_write_typed(pid))
  1629. return -EFAULT;
  1630. *pid = descriptor->socket()->origin_pid();
  1631. return 0;
  1632. }
  1633. if (!descriptor->is_device())
  1634. return -ENOTTY;
  1635. return descriptor->device()->ioctl(*this, request, arg);
  1636. }
  1637. int Process::sys$getdtablesize()
  1638. {
  1639. return m_max_open_file_descriptors;
  1640. }
  1641. int Process::sys$dup(int old_fd)
  1642. {
  1643. auto* descriptor = file_descriptor(old_fd);
  1644. if (!descriptor)
  1645. return -EBADF;
  1646. if (number_of_open_file_descriptors() == m_max_open_file_descriptors)
  1647. return -EMFILE;
  1648. int new_fd = 0;
  1649. for (; new_fd < (int)m_max_open_file_descriptors; ++new_fd) {
  1650. if (!m_fds[new_fd])
  1651. break;
  1652. }
  1653. m_fds[new_fd].set(descriptor);
  1654. return new_fd;
  1655. }
  1656. int Process::sys$dup2(int old_fd, int new_fd)
  1657. {
  1658. auto* descriptor = file_descriptor(old_fd);
  1659. if (!descriptor)
  1660. return -EBADF;
  1661. if (number_of_open_file_descriptors() == m_max_open_file_descriptors)
  1662. return -EMFILE;
  1663. m_fds[new_fd].set(descriptor);
  1664. return new_fd;
  1665. }
  1666. int Process::sys$sigprocmask(int how, const sigset_t* set, sigset_t* old_set)
  1667. {
  1668. if (old_set) {
  1669. if (!validate_write_typed(old_set))
  1670. return -EFAULT;
  1671. *old_set = m_signal_mask;
  1672. }
  1673. if (set) {
  1674. if (!validate_read_typed(set))
  1675. return -EFAULT;
  1676. switch (how) {
  1677. case SIG_BLOCK:
  1678. m_signal_mask &= ~(*set);
  1679. break;
  1680. case SIG_UNBLOCK:
  1681. m_signal_mask |= *set;
  1682. break;
  1683. case SIG_SETMASK:
  1684. m_signal_mask = *set;
  1685. break;
  1686. default:
  1687. return -EINVAL;
  1688. }
  1689. }
  1690. return 0;
  1691. }
  1692. int Process::sys$sigpending(sigset_t* set)
  1693. {
  1694. if (!validate_write_typed(set))
  1695. return -EFAULT;
  1696. *set = m_pending_signals;
  1697. return 0;
  1698. }
  1699. void Process::set_default_signal_dispositions()
  1700. {
  1701. // FIXME: Set up all the right default actions. See signal(7).
  1702. memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
  1703. m_signal_action_data[SIGCHLD].handler_or_sigaction = LinearAddress((dword)SIG_IGN);
  1704. m_signal_action_data[SIGWINCH].handler_or_sigaction = LinearAddress((dword)SIG_IGN);
  1705. }
  1706. int Process::sys$sigaction(int signum, const sigaction* act, sigaction* old_act)
  1707. {
  1708. if (signum < 1 || signum >= 32 || signum == SIGKILL || signum == SIGSTOP)
  1709. return -EINVAL;
  1710. if (!validate_read_typed(act))
  1711. return -EFAULT;
  1712. InterruptDisabler disabler; // FIXME: This should use a narrower lock. Maybe a way to ignore signals temporarily?
  1713. auto& action = m_signal_action_data[signum];
  1714. if (old_act) {
  1715. if (!validate_write_typed(old_act))
  1716. return -EFAULT;
  1717. old_act->sa_flags = action.flags;
  1718. old_act->sa_restorer = (decltype(old_act->sa_restorer))action.restorer.get();
  1719. old_act->sa_sigaction = (decltype(old_act->sa_sigaction))action.handler_or_sigaction.get();
  1720. }
  1721. action.restorer = LinearAddress((dword)act->sa_restorer);
  1722. action.flags = act->sa_flags;
  1723. action.handler_or_sigaction = LinearAddress((dword)act->sa_sigaction);
  1724. return 0;
  1725. }
  1726. int Process::sys$getgroups(ssize_t count, gid_t* gids)
  1727. {
  1728. if (count < 0)
  1729. return -EINVAL;
  1730. ASSERT(m_gids.size() < MAX_PROCESS_GIDS);
  1731. if (!count)
  1732. return m_gids.size();
  1733. if (count != (int)m_gids.size())
  1734. return -EINVAL;
  1735. if (!validate_write_typed(gids, m_gids.size()))
  1736. return -EFAULT;
  1737. size_t i = 0;
  1738. for (auto gid : m_gids)
  1739. gids[i++] = gid;
  1740. return 0;
  1741. }
  1742. int Process::sys$setgroups(ssize_t count, const gid_t* gids)
  1743. {
  1744. if (count < 0)
  1745. return -EINVAL;
  1746. if (!is_superuser())
  1747. return -EPERM;
  1748. if (count >= MAX_PROCESS_GIDS)
  1749. return -EINVAL;
  1750. if (!validate_read(gids, count))
  1751. return -EFAULT;
  1752. m_gids.clear();
  1753. m_gids.set(m_gid);
  1754. for (int i = 0; i < count; ++i)
  1755. m_gids.set(gids[i]);
  1756. return 0;
  1757. }
  1758. int Process::sys$mkdir(const char* pathname, mode_t mode)
  1759. {
  1760. if (!validate_read_str(pathname))
  1761. return -EFAULT;
  1762. size_t pathname_length = strlen(pathname);
  1763. if (pathname_length == 0)
  1764. return -EINVAL;
  1765. if (pathname_length >= 255)
  1766. return -ENAMETOOLONG;
  1767. return VFS::the().mkdir(String(pathname, pathname_length), mode & ~umask(), cwd_inode());
  1768. }
  1769. clock_t Process::sys$times(tms* times)
  1770. {
  1771. if (!validate_write_typed(times))
  1772. return -EFAULT;
  1773. times->tms_utime = m_ticks_in_user;
  1774. times->tms_stime = m_ticks_in_kernel;
  1775. times->tms_cutime = m_ticks_in_user_for_dead_children;
  1776. times->tms_cstime = m_ticks_in_kernel_for_dead_children;
  1777. return 0;
  1778. }
  1779. int Process::sys$select(const Syscall::SC_select_params* params)
  1780. {
  1781. if (!validate_read_typed(params))
  1782. return -EFAULT;
  1783. if (params->writefds && !validate_read_typed(params->writefds))
  1784. return -EFAULT;
  1785. if (params->readfds && !validate_read_typed(params->readfds))
  1786. return -EFAULT;
  1787. if (params->exceptfds && !validate_read_typed(params->exceptfds))
  1788. return -EFAULT;
  1789. if (params->timeout && !validate_read_typed(params->timeout))
  1790. return -EFAULT;
  1791. int nfds = params->nfds;
  1792. fd_set* writefds = params->writefds;
  1793. fd_set* readfds = params->readfds;
  1794. fd_set* exceptfds = params->exceptfds;
  1795. auto* timeout = params->timeout;
  1796. // FIXME: Implement exceptfds support.
  1797. (void)exceptfds;
  1798. if (timeout) {
  1799. m_select_timeout = *timeout;
  1800. m_select_has_timeout = true;
  1801. } else {
  1802. m_select_has_timeout = false;
  1803. }
  1804. if (nfds < 0)
  1805. return -EINVAL;
  1806. // FIXME: Return -EINTR if a signal is caught.
  1807. // FIXME: Return -EINVAL if timeout is invalid.
  1808. auto transfer_fds = [this, nfds] (fd_set* set, auto& vector) -> int {
  1809. if (!set)
  1810. return 0;
  1811. vector.clear_with_capacity();
  1812. auto bitmap = Bitmap::wrap((byte*)set, FD_SETSIZE);
  1813. for (int i = 0; i < nfds; ++i) {
  1814. if (bitmap.get(i)) {
  1815. if (!file_descriptor(i))
  1816. return -EBADF;
  1817. vector.append(i);
  1818. }
  1819. }
  1820. return 0;
  1821. };
  1822. int error = 0;
  1823. error = transfer_fds(writefds, m_select_write_fds);
  1824. if (error)
  1825. return error;
  1826. error = transfer_fds(readfds, m_select_read_fds);
  1827. if (error)
  1828. return error;
  1829. error = transfer_fds(readfds, m_select_exceptional_fds);
  1830. if (error)
  1831. return error;
  1832. #ifdef DEBUG_IO
  1833. dbgprintf("%s<%u> selecting on (read:%u, write:%u), wakeup_req:%u, timeout=%p\n", name().characters(), pid(), m_select_read_fds.size(), m_select_write_fds.size(), m_wakeup_requested, timeout);
  1834. #endif
  1835. if (!m_wakeup_requested && (!timeout || (timeout->tv_sec || timeout->tv_usec))) {
  1836. block(BlockedSelect);
  1837. Scheduler::yield();
  1838. }
  1839. m_wakeup_requested = false;
  1840. int markedfds = 0;
  1841. if (readfds) {
  1842. memset(readfds, 0, sizeof(fd_set));
  1843. auto bitmap = Bitmap::wrap((byte*)readfds, FD_SETSIZE);
  1844. for (int fd : m_select_read_fds) {
  1845. auto* descriptor = file_descriptor(fd);
  1846. if (!descriptor)
  1847. continue;
  1848. if (descriptor->can_read(*this)) {
  1849. bitmap.set(fd, true);
  1850. ++markedfds;
  1851. }
  1852. }
  1853. }
  1854. if (writefds) {
  1855. memset(writefds, 0, sizeof(fd_set));
  1856. auto bitmap = Bitmap::wrap((byte*)writefds, FD_SETSIZE);
  1857. for (int fd : m_select_write_fds) {
  1858. auto* descriptor = file_descriptor(fd);
  1859. if (!descriptor)
  1860. continue;
  1861. if (descriptor->can_write(*this)) {
  1862. bitmap.set(fd, true);
  1863. ++markedfds;
  1864. }
  1865. }
  1866. }
  1867. // FIXME: Check for exceptional conditions.
  1868. return markedfds;
  1869. }
  1870. int Process::sys$poll(pollfd* fds, int nfds, int timeout)
  1871. {
  1872. if (!validate_read_typed(fds))
  1873. return -EFAULT;
  1874. m_select_write_fds.clear_with_capacity();
  1875. m_select_read_fds.clear_with_capacity();
  1876. for (int i = 0; i < nfds; ++i) {
  1877. if (fds[i].events & POLLIN)
  1878. m_select_read_fds.append(fds[i].fd);
  1879. if (fds[i].events & POLLOUT)
  1880. m_select_write_fds.append(fds[i].fd);
  1881. }
  1882. if (!m_wakeup_requested && timeout < 0) {
  1883. block(BlockedSelect);
  1884. Scheduler::yield();
  1885. }
  1886. m_wakeup_requested = false;
  1887. int fds_with_revents = 0;
  1888. for (int i = 0; i < nfds; ++i) {
  1889. auto* descriptor = file_descriptor(fds[i].fd);
  1890. if (!descriptor) {
  1891. fds[i].revents = POLLNVAL;
  1892. continue;
  1893. }
  1894. fds[i].revents = 0;
  1895. if (fds[i].events & POLLIN && descriptor->can_read(*this))
  1896. fds[i].revents |= POLLIN;
  1897. if (fds[i].events & POLLOUT && descriptor->can_write(*this))
  1898. fds[i].revents |= POLLOUT;
  1899. if (fds[i].revents)
  1900. ++fds_with_revents;
  1901. }
  1902. return fds_with_revents;
  1903. }
  1904. Inode& Process::cwd_inode()
  1905. {
  1906. // FIXME: This is retarded factoring.
  1907. if (!m_cwd)
  1908. m_cwd = VFS::the().root_inode();
  1909. return *m_cwd;
  1910. }
  1911. int Process::sys$link(const char* old_path, const char* new_path)
  1912. {
  1913. if (!validate_read_str(old_path))
  1914. return -EFAULT;
  1915. if (!validate_read_str(new_path))
  1916. return -EFAULT;
  1917. return VFS::the().link(String(old_path), String(new_path), cwd_inode());
  1918. }
  1919. int Process::sys$unlink(const char* pathname)
  1920. {
  1921. if (!validate_read_str(pathname))
  1922. return -EFAULT;
  1923. return VFS::the().unlink(String(pathname), cwd_inode());
  1924. }
  1925. int Process::sys$rmdir(const char* pathname)
  1926. {
  1927. if (!validate_read_str(pathname))
  1928. return -EFAULT;
  1929. return VFS::the().rmdir(String(pathname), cwd_inode());
  1930. }
  1931. int Process::sys$read_tsc(dword* lsw, dword* msw)
  1932. {
  1933. if (!validate_write_typed(lsw))
  1934. return -EFAULT;
  1935. if (!validate_write_typed(msw))
  1936. return -EFAULT;
  1937. read_tsc(*lsw, *msw);
  1938. return 0;
  1939. }
  1940. int Process::sys$chmod(const char* pathname, mode_t mode)
  1941. {
  1942. if (!validate_read_str(pathname))
  1943. return -EFAULT;
  1944. return VFS::the().chmod(String(pathname), mode, cwd_inode());
  1945. }
  1946. int Process::sys$fchmod(int fd, mode_t mode)
  1947. {
  1948. auto* descriptor = file_descriptor(fd);
  1949. if (!descriptor)
  1950. return -EBADF;
  1951. return descriptor->fchmod(mode);
  1952. }
  1953. int Process::sys$chown(const char* pathname, uid_t uid, gid_t gid)
  1954. {
  1955. if (!validate_read_str(pathname))
  1956. return -EFAULT;
  1957. return VFS::the().chown(String(pathname), uid, gid, cwd_inode());
  1958. }
  1959. void Process::finalize()
  1960. {
  1961. ASSERT(current == g_finalizer);
  1962. m_fds.clear();
  1963. m_tty = nullptr;
  1964. disown_all_shared_buffers();
  1965. {
  1966. InterruptDisabler disabler;
  1967. if (auto* parent_process = Process::from_pid(m_ppid)) {
  1968. if (parent_process->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT) {
  1969. // NOTE: If the parent doesn't care about this process, let it go.
  1970. m_ppid = 0;
  1971. } else {
  1972. parent_process->send_signal(SIGCHLD, this);
  1973. }
  1974. }
  1975. }
  1976. set_state(Dead);
  1977. }
  1978. void Process::die()
  1979. {
  1980. set_state(Dying);
  1981. if (!Scheduler::is_active())
  1982. Scheduler::pick_next_and_switch_now();
  1983. }
  1984. size_t Process::amount_virtual() const
  1985. {
  1986. size_t amount = 0;
  1987. for (auto& region : m_regions) {
  1988. amount += region->size();
  1989. }
  1990. return amount;
  1991. }
  1992. size_t Process::amount_resident() const
  1993. {
  1994. // FIXME: This will double count if multiple regions use the same physical page.
  1995. size_t amount = 0;
  1996. for (auto& region : m_regions) {
  1997. amount += region->amount_resident();
  1998. }
  1999. return amount;
  2000. }
  2001. size_t Process::amount_shared() const
  2002. {
  2003. // FIXME: This will double count if multiple regions use the same physical page.
  2004. // FIXME: It doesn't work at the moment, since it relies on PhysicalPage retain counts,
  2005. // and each PhysicalPage is only retained by its VMObject. This needs to be refactored
  2006. // so that every Region contributes +1 retain to each of its PhysicalPages.
  2007. size_t amount = 0;
  2008. for (auto& region : m_regions) {
  2009. amount += region->amount_shared();
  2010. }
  2011. return amount;
  2012. }
  2013. void Process::finalize_dying_processes()
  2014. {
  2015. Vector<Process*> dying_processes;
  2016. {
  2017. InterruptDisabler disabler;
  2018. dying_processes.ensure_capacity(system.nprocess);
  2019. for (auto* process = g_processes->head(); process; process = process->next()) {
  2020. if (process->state() == Process::Dying)
  2021. dying_processes.append(process);
  2022. }
  2023. }
  2024. for (auto* process : dying_processes)
  2025. process->finalize();
  2026. }
  2027. bool Process::tick()
  2028. {
  2029. ++m_ticks;
  2030. if (tss().cs & 3)
  2031. ++m_ticks_in_user;
  2032. else
  2033. ++m_ticks_in_kernel;
  2034. return --m_ticks_left;
  2035. }
  2036. int Process::sys$socket(int domain, int type, int protocol)
  2037. {
  2038. if (number_of_open_file_descriptors() >= m_max_open_file_descriptors)
  2039. return -EMFILE;
  2040. int fd = 0;
  2041. for (; fd < (int)m_max_open_file_descriptors; ++fd) {
  2042. if (!m_fds[fd])
  2043. break;
  2044. }
  2045. int error;
  2046. auto socket = Socket::create(domain, type, protocol, error);
  2047. if (!socket)
  2048. return error;
  2049. auto descriptor = FileDescriptor::create(move(socket));
  2050. unsigned flags = 0;
  2051. if (type & SOCK_CLOEXEC)
  2052. flags |= FD_CLOEXEC;
  2053. if (type & SOCK_NONBLOCK)
  2054. descriptor->set_blocking(false);
  2055. m_fds[fd].set(move(descriptor), flags);
  2056. return fd;
  2057. }
  2058. int Process::sys$bind(int sockfd, const sockaddr* address, socklen_t address_length)
  2059. {
  2060. if (!validate_read(address, address_length))
  2061. return -EFAULT;
  2062. auto* descriptor = file_descriptor(sockfd);
  2063. if (!descriptor)
  2064. return -EBADF;
  2065. if (!descriptor->is_socket())
  2066. return -ENOTSOCK;
  2067. auto& socket = *descriptor->socket();
  2068. int error;
  2069. if (!socket.bind(address, address_length, error))
  2070. return error;
  2071. return 0;
  2072. }
  2073. int Process::sys$listen(int sockfd, int backlog)
  2074. {
  2075. auto* descriptor = file_descriptor(sockfd);
  2076. if (!descriptor)
  2077. return -EBADF;
  2078. if (!descriptor->is_socket())
  2079. return -ENOTSOCK;
  2080. auto& socket = *descriptor->socket();
  2081. int error;
  2082. if (!socket.listen(backlog, error))
  2083. return error;
  2084. descriptor->set_socket_role(SocketRole::Listener);
  2085. return 0;
  2086. }
  2087. int Process::sys$accept(int accepting_socket_fd, sockaddr* address, socklen_t* address_size)
  2088. {
  2089. if (!validate_write_typed(address_size))
  2090. return -EFAULT;
  2091. if (!validate_write(address, *address_size))
  2092. return -EFAULT;
  2093. if (number_of_open_file_descriptors() >= m_max_open_file_descriptors)
  2094. return -EMFILE;
  2095. int accepted_socket_fd = 0;
  2096. for (; accepted_socket_fd < (int)m_max_open_file_descriptors; ++accepted_socket_fd) {
  2097. if (!m_fds[accepted_socket_fd])
  2098. break;
  2099. }
  2100. auto* accepting_socket_descriptor = file_descriptor(accepting_socket_fd);
  2101. if (!accepting_socket_descriptor)
  2102. return -EBADF;
  2103. if (!accepting_socket_descriptor->is_socket())
  2104. return -ENOTSOCK;
  2105. auto& socket = *accepting_socket_descriptor->socket();
  2106. if (!socket.can_accept()) {
  2107. ASSERT(!accepting_socket_descriptor->is_blocking());
  2108. return -EAGAIN;
  2109. }
  2110. auto accepted_socket = socket.accept();
  2111. ASSERT(accepted_socket);
  2112. bool success = accepted_socket->get_address(address, address_size);
  2113. ASSERT(success);
  2114. auto accepted_socket_descriptor = FileDescriptor::create(move(accepted_socket), SocketRole::Accepted);
  2115. // NOTE: The accepted socket inherits fd flags from the accepting socket.
  2116. // I'm not sure if this matches other systems but it makes sense to me.
  2117. accepted_socket_descriptor->set_blocking(accepting_socket_descriptor->is_blocking());
  2118. m_fds[accepted_socket_fd].set(move(accepted_socket_descriptor), m_fds[accepting_socket_fd].flags);
  2119. return accepted_socket_fd;
  2120. }
  2121. int Process::sys$connect(int sockfd, const sockaddr* address, socklen_t address_size)
  2122. {
  2123. if (!validate_read(address, address_size))
  2124. return -EFAULT;
  2125. if (number_of_open_file_descriptors() >= m_max_open_file_descriptors)
  2126. return -EMFILE;
  2127. int fd = 0;
  2128. for (; fd < (int)m_max_open_file_descriptors; ++fd) {
  2129. if (!m_fds[fd])
  2130. break;
  2131. }
  2132. auto* descriptor = file_descriptor(sockfd);
  2133. if (!descriptor)
  2134. return -EBADF;
  2135. if (!descriptor->is_socket())
  2136. return -ENOTSOCK;
  2137. auto& socket = *descriptor->socket();
  2138. int error;
  2139. if (!socket.connect(address, address_size, error))
  2140. return error;
  2141. descriptor->set_socket_role(SocketRole::Connected);
  2142. return 0;
  2143. }
  2144. bool Process::wait_for_connect(Socket& socket, int& error)
  2145. {
  2146. if (socket.is_connected())
  2147. return true;
  2148. m_blocked_connecting_socket = socket;
  2149. block(BlockedConnect);
  2150. Scheduler::yield();
  2151. m_blocked_connecting_socket = nullptr;
  2152. if (!socket.is_connected()) {
  2153. error = -ECONNREFUSED;
  2154. return false;
  2155. }
  2156. return true;
  2157. }
  2158. struct SharedBuffer {
  2159. SharedBuffer(pid_t pid1, pid_t pid2, size_t size)
  2160. : m_pid1(pid1)
  2161. , m_pid2(pid2)
  2162. , m_vmo(VMObject::create_anonymous(size))
  2163. {
  2164. ASSERT(pid1 != pid2);
  2165. }
  2166. void* retain(Process& process)
  2167. {
  2168. if (m_pid1 == process.pid()) {
  2169. ++m_pid1_retain_count;
  2170. if (!m_pid1_region) {
  2171. m_pid1_region = process.allocate_region_with_vmo(LinearAddress(), size(), m_vmo.copy_ref(), 0, "SharedBuffer", true, true);
  2172. m_pid1_region->set_shared(true);
  2173. }
  2174. return m_pid1_region->laddr().as_ptr();
  2175. } else if (m_pid2 == process.pid()) {
  2176. ++m_pid2_retain_count;
  2177. if (!m_pid2_region) {
  2178. m_pid2_region = process.allocate_region_with_vmo(LinearAddress(), size(), m_vmo.copy_ref(), 0, "SharedBuffer", true, true);
  2179. m_pid2_region->set_shared(true);
  2180. }
  2181. return m_pid2_region->laddr().as_ptr();
  2182. }
  2183. return nullptr;
  2184. }
  2185. void release(Process& process)
  2186. {
  2187. if (m_pid1 == process.pid()) {
  2188. ASSERT(m_pid1_retain_count);
  2189. --m_pid1_retain_count;
  2190. if (!m_pid1_retain_count) {
  2191. if (m_pid1_region)
  2192. process.deallocate_region(*m_pid1_region);
  2193. m_pid1_region = nullptr;
  2194. }
  2195. destroy_if_unused();
  2196. } else if (m_pid2 == process.pid()) {
  2197. ASSERT(m_pid2_retain_count);
  2198. --m_pid2_retain_count;
  2199. if (!m_pid2_retain_count) {
  2200. if (m_pid2_region)
  2201. process.deallocate_region(*m_pid2_region);
  2202. m_pid2_region = nullptr;
  2203. }
  2204. destroy_if_unused();
  2205. }
  2206. }
  2207. void disown(pid_t pid)
  2208. {
  2209. if (m_pid1 == pid) {
  2210. m_pid1 = 0;
  2211. m_pid1_retain_count = 0;
  2212. destroy_if_unused();
  2213. } else if (m_pid2 == pid) {
  2214. m_pid2 = 0;
  2215. m_pid2_retain_count = 0;
  2216. destroy_if_unused();
  2217. }
  2218. }
  2219. pid_t pid1() const { return m_pid1; }
  2220. pid_t pid2() const { return m_pid2; }
  2221. unsigned pid1_retain_count() const { return m_pid1_retain_count; }
  2222. unsigned pid2_retain_count() const { return m_pid2_retain_count; }
  2223. size_t size() const { return m_vmo->size(); }
  2224. void destroy_if_unused();
  2225. int m_shared_buffer_id { -1 };
  2226. pid_t m_pid1;
  2227. pid_t m_pid2;
  2228. unsigned m_pid1_retain_count { 1 };
  2229. unsigned m_pid2_retain_count { 0 };
  2230. Region* m_pid1_region { nullptr };
  2231. Region* m_pid2_region { nullptr };
  2232. Retained<VMObject> m_vmo;
  2233. };
  2234. static int s_next_shared_buffer_id;
  2235. Lockable<HashMap<int, OwnPtr<SharedBuffer>>>& shared_buffers()
  2236. {
  2237. static Lockable<HashMap<int, OwnPtr<SharedBuffer>>>* map;
  2238. if (!map)
  2239. map = new Lockable<HashMap<int, OwnPtr<SharedBuffer>>>;
  2240. return *map;
  2241. }
  2242. void SharedBuffer::destroy_if_unused()
  2243. {
  2244. if (!m_pid1_retain_count && !m_pid2_retain_count) {
  2245. LOCKER(shared_buffers().lock());
  2246. #ifdef SHARED_BUFFER_DEBUG
  2247. dbgprintf("Destroying unused SharedBuffer{%p} id: %d (pid1: %d, pid2: %d)\n", this, m_shared_buffer_id, m_pid1, m_pid2);
  2248. #endif
  2249. size_t count_before = shared_buffers().resource().size();
  2250. shared_buffers().resource().remove(m_shared_buffer_id);
  2251. ASSERT(count_before != shared_buffers().resource().size());
  2252. }
  2253. }
  2254. void Process::disown_all_shared_buffers()
  2255. {
  2256. LOCKER(shared_buffers().lock());
  2257. Vector<SharedBuffer*> buffers_to_disown;
  2258. for (auto& it : shared_buffers().resource())
  2259. buffers_to_disown.append(it.value.ptr());
  2260. for (auto* shared_buffer : buffers_to_disown)
  2261. shared_buffer->disown(m_pid);
  2262. }
  2263. int Process::sys$create_shared_buffer(pid_t peer_pid, size_t size, void** buffer)
  2264. {
  2265. if (!size)
  2266. return -EINVAL;
  2267. size = PAGE_ROUND_UP(size);
  2268. if (!peer_pid || peer_pid < 0 || peer_pid == m_pid)
  2269. return -EINVAL;
  2270. if (!validate_write_typed(buffer))
  2271. return -EFAULT;
  2272. {
  2273. InterruptDisabler disabler;
  2274. auto* peer = Process::from_pid(peer_pid);
  2275. if (!peer)
  2276. return -ESRCH;
  2277. }
  2278. LOCKER(shared_buffers().lock());
  2279. int shared_buffer_id = ++s_next_shared_buffer_id;
  2280. auto shared_buffer = make<SharedBuffer>(m_pid, peer_pid, size);
  2281. shared_buffer->m_shared_buffer_id = shared_buffer_id;
  2282. ASSERT(shared_buffer->size() >= size);
  2283. shared_buffer->m_pid1_region = allocate_region_with_vmo(LinearAddress(), shared_buffer->size(), shared_buffer->m_vmo.copy_ref(), 0, "SharedBuffer", true, true);
  2284. shared_buffer->m_pid1_region->set_shared(true);
  2285. *buffer = shared_buffer->m_pid1_region->laddr().as_ptr();
  2286. #ifdef SHARED_BUFFER_DEBUG
  2287. dbgprintf("%s(%u): Created shared buffer %d (%u bytes, vmo is %u) for sharing with %d\n", name().characters(), pid(),shared_buffer_id, size, shared_buffer->size(), peer_pid);
  2288. #endif
  2289. shared_buffers().resource().set(shared_buffer_id, move(shared_buffer));
  2290. return shared_buffer_id;
  2291. }
  2292. int Process::sys$release_shared_buffer(int shared_buffer_id)
  2293. {
  2294. LOCKER(shared_buffers().lock());
  2295. auto it = shared_buffers().resource().find(shared_buffer_id);
  2296. if (it == shared_buffers().resource().end())
  2297. return -EINVAL;
  2298. auto& shared_buffer = *(*it).value;
  2299. #ifdef SHARED_BUFFER_DEBUG
  2300. dbgprintf("%s(%u): Releasing shared buffer %d, buffer count: %u\n", name().characters(), pid(), shared_buffer_id, shared_buffers().resource().size());
  2301. #endif
  2302. shared_buffer.release(*this);
  2303. return 0;
  2304. }
  2305. void* Process::sys$get_shared_buffer(int shared_buffer_id)
  2306. {
  2307. LOCKER(shared_buffers().lock());
  2308. auto it = shared_buffers().resource().find(shared_buffer_id);
  2309. if (it == shared_buffers().resource().end())
  2310. return (void*)-EINVAL;
  2311. auto& shared_buffer = *(*it).value;
  2312. if (shared_buffer.pid1() != m_pid && shared_buffer.pid2() != m_pid)
  2313. return (void*)-EINVAL;
  2314. #ifdef SHARED_BUFFER_DEBUG
  2315. dbgprintf("%s(%u): Retaining shared buffer %d, buffer count: %u\n", name().characters(), pid(), shared_buffer_id, shared_buffers().resource().size());
  2316. #endif
  2317. return shared_buffer.retain(*this);
  2318. }